diff --git a/HACKING.rst b/HACKING.rst
index 0d30194b61..d4cc74b212 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -35,6 +35,8 @@ Nova Specific Commandments
self.flags(option=value) instead.
- [N321] Validate that LOG messages, except debug ones, have translations
- [N322] Method's default argument shouldn't be mutable
+- [N323] Ensure that the _() function is explicitly imported to ensure proper translations.
+- [N324] Ensure that jsonutils.%(fun)s must be used instead of json.%(fun)s
Creating Unit Tests
-------------------
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index 99229da36b..b8fac5f559 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -272,6 +272,14 @@
"namespace": "http://docs.openstack.org/compute/ext/evacuate/api/v2",
"updated": "2013-01-06T00:00:00Z"
},
+ {
+ "alias": "os-extended-evacuate-find-host",
+ "description": "Enables server evacuation without target host. Scheduler will select\n one to target.\n ",
+ "links": [],
+ "name": "ExtendedEvacuateFindHost",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_evacuate_find_host/api/v2",
+ "updated": "2014-02-12T00:00:00Z"
+ },
{
"alias": "os-extended-floating-ips",
"description": "Adds optional fixed_address to the add floating IP command.",
@@ -288,6 +296,14 @@
"namespace": "http://docs.openstack.org/compute/ext/extended_hypervisors/api/v1.1",
"updated": "2014-01-04T00:00:00Z"
},
+ {
+ "alias": "os-extended-networks",
+ "description": "Adds additional fields to networks",
+ "links": [],
+ "name": "ExtendedNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_networks/api/v2",
+ "updated": "2014-05-09T00:00:00Z"
+ },
{
"alias": "os-extended-quotas",
"description": "Adds ability for admins to delete quota\n and optionally force the update Quota command.\n ",
@@ -304,6 +320,14 @@
"namespace": "http://docs.openstack.org/compute/ext/extended_rescue_with_image/api/v2",
"updated": "2014-01-04T00:00:00Z"
},
+ {
+ "alias": "os-hypervisor-status",
+ "description": "Show hypervisor status.",
+ "links": [],
+ "name": "HypervisorStatus",
+ "namespace": "http://docs.openstack.org/compute/ext/hypervisor_status/api/v1.1",
+ "updated": "2014-04-17T00:00:00Z"
+ },
{
"alias": "os-extended-services",
"description": "Extended services support.",
@@ -568,6 +592,14 @@
"namespace": "http://docs.openstack.org/compute/ext/servergroups/api/v2",
"updated": "2013-06-20T00:00:00Z"
},
+ {
+ "alias": "os-server-list-multi-status",
+ "description": "Allow to filter the servers by a set of status values.",
+ "links": [],
+ "name": "ServerListMultiStatus",
+ "namespace": "http://docs.openstack.org/compute/ext/os-server-list-multi-status/api/v2",
+ "updated": "2014-05-11T00:00:00Z"
+ },
{
"alias": "os-server-password",
"description": "Server password support.",
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index eb4e6e32d3..fb56dd8f4b 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -118,12 +118,20 @@
Enables server evacuation.
+
+ Enables server evacuation without target host. Scheduler will select
+ one to target.
+
+
Adds optional fixed_address to the add floating IP command.
Extended hypervisors support.
+
+ Adds additional fields to networks
+
Adds ability for admins to delete quota
and optionally force the update Quota command.
@@ -138,6 +146,9 @@
Extended services deletion support.
+
+ Show hypervisor status.
+
Extended Volumes support.
@@ -231,6 +242,9 @@
Server group support.
+
+ Allow to filter the servers by a set of status values.
+
Server password support.
diff --git a/doc/api_samples/os-agents/agent-post-req.json b/doc/api_samples/os-agents/agent-post-req.json
index 217993b17f..1913498547 100644
--- a/doc/api_samples/os-agents/agent-post-req.json
+++ b/doc/api_samples/os-agents/agent-post-req.json
@@ -5,6 +5,6 @@
"architecture": "x86",
"version": "8.0",
"md5hash": "add6bb58e139be103324d04d82d8f545",
- "url": "xxxxxxxxxxxx"
+ "url": "http://example.com/path/to/resource"
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-agents/agent-post-req.xml b/doc/api_samples/os-agents/agent-post-req.xml
index be93e97ce4..b7b7d036ba 100644
--- a/doc/api_samples/os-agents/agent-post-req.xml
+++ b/doc/api_samples/os-agents/agent-post-req.xml
@@ -5,5 +5,5 @@
x86
8.0
add6bb58e139be103324d04d82d8f545
- xxxxxxxxxxxx
-
\ No newline at end of file
+ http://example.com/path/to/resource
+
diff --git a/doc/api_samples/os-agents/agent-post-resp.json b/doc/api_samples/os-agents/agent-post-resp.json
index f6c760cc67..24ddede90b 100644
--- a/doc/api_samples/os-agents/agent-post-resp.json
+++ b/doc/api_samples/os-agents/agent-post-resp.json
@@ -5,7 +5,7 @@
"hypervisor": "hypervisor",
"md5hash": "add6bb58e139be103324d04d82d8f545",
"os": "os",
- "url": "xxxxxxxxxxxx",
+ "url": "http://example.com/path/to/resource",
"version": "8.0"
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-agents/agent-post-resp.xml b/doc/api_samples/os-agents/agent-post-resp.xml
index 79f62b7fb9..abfe15f909 100644
--- a/doc/api_samples/os-agents/agent-post-resp.xml
+++ b/doc/api_samples/os-agents/agent-post-resp.xml
@@ -1,10 +1,10 @@
- xxxxxxxxxxxx
+ http://example.com/path/to/resource
hypervisor
add6bb58e139be103324d04d82d8f545
8.0
x86
os
1
-
\ No newline at end of file
+
diff --git a/doc/api_samples/os-agents/agent-update-put-req.json b/doc/api_samples/os-agents/agent-update-put-req.json
index e4eaf53525..f7398504d6 100644
--- a/doc/api_samples/os-agents/agent-update-put-req.json
+++ b/doc/api_samples/os-agents/agent-update-put-req.json
@@ -1,7 +1,7 @@
{
"para": {
- "url": "xxx://xxxx/xxx/xxx",
+ "url": "http://example.com/path/to/resource",
"md5hash": "add6bb58e139be103324d04d82d8f545",
"version": "7.0"
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-agents/agent-update-put-req.xml b/doc/api_samples/os-agents/agent-update-put-req.xml
index f759880c17..9a25cefdda 100644
--- a/doc/api_samples/os-agents/agent-update-put-req.xml
+++ b/doc/api_samples/os-agents/agent-update-put-req.xml
@@ -1,6 +1,6 @@
7.0
- xxx://xxxx/xxx/xxx
+ http://example.com/path/to/resource
add6bb58e139be103324d04d82d8f545
-
\ No newline at end of file
+
diff --git a/doc/api_samples/os-agents/agent-update-put-resp.json b/doc/api_samples/os-agents/agent-update-put-resp.json
index 6b67222c8c..2919d21388 100644
--- a/doc/api_samples/os-agents/agent-update-put-resp.json
+++ b/doc/api_samples/os-agents/agent-update-put-resp.json
@@ -2,7 +2,7 @@
"agent": {
"agent_id": "1",
"md5hash": "add6bb58e139be103324d04d82d8f545",
- "url": "xxx://xxxx/xxx/xxx",
+ "url": "http://example.com/path/to/resource",
"version": "7.0"
}
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-agents/agent-update-put-resp.xml b/doc/api_samples/os-agents/agent-update-put-resp.xml
index badf2750ea..ce62db3868 100644
--- a/doc/api_samples/os-agents/agent-update-put-resp.xml
+++ b/doc/api_samples/os-agents/agent-update-put-resp.xml
@@ -1,7 +1,7 @@
- xxx://xxxx/xxx/xxx
+ http://example.com/path/to/resource
7.0
1
add6bb58e139be103324d04d82d8f545
-
\ No newline at end of file
+
diff --git a/doc/api_samples/os-agents/agents-get-resp.json b/doc/api_samples/os-agents/agents-get-resp.json
index 73ba45c240..92e14e1dc5 100644
--- a/doc/api_samples/os-agents/agents-get-resp.json
+++ b/doc/api_samples/os-agents/agents-get-resp.json
@@ -6,8 +6,8 @@
"hypervisor": "hypervisor",
"md5hash": "add6bb58e139be103324d04d82d8f545",
"os": "os",
- "url": "xxxxxxxxxxxx",
+ "url": "http://example.com/path/to/resource",
"version": "8.0"
}
]
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-agents/agents-get-resp.xml b/doc/api_samples/os-agents/agents-get-resp.xml
index 4194f62c96..d804245305 100644
--- a/doc/api_samples/os-agents/agents-get-resp.xml
+++ b/doc/api_samples/os-agents/agents-get-resp.xml
@@ -1,4 +1,4 @@
-
-
\ No newline at end of file
+
+
diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json
new file mode 100644
index 0000000000..e9ee83481c
--- /dev/null
+++ b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json
@@ -0,0 +1,6 @@
+{
+ "evacuate": {
+ "adminPass": "MySecretPass",
+ "onSharedStorage": "False"
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml
new file mode 100644
index 0000000000..4faf14a785
--- /dev/null
+++ b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml
@@ -0,0 +1,4 @@
+
+
\ No newline at end of file
diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json
new file mode 100644
index 0000000000..6cd942395f
--- /dev/null
+++ b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json
@@ -0,0 +1,3 @@
+{
+ "adminPass": "MySecretPass"
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml
new file mode 100644
index 0000000000..5823886702
--- /dev/null
+++ b/doc/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml
@@ -0,0 +1,2 @@
+
+MySecretPass
\ No newline at end of file
diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-post-req.json b/doc/api_samples/os-extended-evacuate-find-host/server-post-req.json
new file mode 100644
index 0000000000..d88eb41222
--- /dev/null
+++ b/doc/api_samples/os-extended-evacuate-find-host/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-post-req.xml b/doc/api_samples/os-extended-evacuate-find-host/server-post-req.xml
new file mode 100644
index 0000000000..0a3c8bb530
--- /dev/null
+++ b/doc/api_samples/os-extended-evacuate-find-host/server-post-req.xml
@@ -0,0 +1,19 @@
+
+
+
+ Apache1
+
+
+
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+
+
+
\ No newline at end of file
diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-post-resp.json b/doc/api_samples/os-extended-evacuate-find-host/server-post-resp.json
new file mode 100644
index 0000000000..e07dceaeaa
--- /dev/null
+++ b/doc/api_samples/os-extended-evacuate-find-host/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "y6hsKno56L6R",
+ "id": "1c650ba2-6a76-41d1-805c-64f4e312200e",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/1c650ba2-6a76-41d1-805c-64f4e312200e",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/1c650ba2-6a76-41d1-805c-64f4e312200e",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-extended-evacuate-find-host/server-post-resp.xml b/doc/api_samples/os-extended-evacuate-find-host/server-post-resp.xml
new file mode 100644
index 0000000000..ad40d9e731
--- /dev/null
+++ b/doc/api_samples/os-extended-evacuate-find-host/server-post-resp.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json b/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json
index 01b6428446..bb20f50afd 100644
--- a/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json
+++ b/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json
@@ -8,7 +8,7 @@
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
diff --git a/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml b/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml
index 244e899969..4fd6ea8f9e 100644
--- a/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml
+++ b/doc/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml
@@ -1,4 +1,4 @@
-
+
diff --git a/doc/api_samples/os-extended-networks/network-create-req.json b/doc/api_samples/os-extended-networks/network-create-req.json
new file mode 100644
index 0000000000..18515bd6c4
--- /dev/null
+++ b/doc/api_samples/os-extended-networks/network-create-req.json
@@ -0,0 +1,12 @@
+{
+ "network": {
+ "label": "new net 111",
+ "cidr": "10.20.105.0/24",
+ "mtu": 9000,
+ "dhcp_server": "10.20.105.2",
+ "enable_dhcp": false,
+ "share_address": true,
+ "allowed_start": "10.20.105.10",
+ "allowed_end": "10.20.105.200"
+ }
+}
diff --git a/doc/api_samples/os-extended-networks/network-create-req.xml b/doc/api_samples/os-extended-networks/network-create-req.xml
new file mode 100644
index 0000000000..3cc79bd837
--- /dev/null
+++ b/doc/api_samples/os-extended-networks/network-create-req.xml
@@ -0,0 +1,10 @@
+
+
+ 10.20.105.0/24
+ 9000
+ 10.20.105.2
+ False
+ True
+ 10.20.105.10
+ 10.20.105.200
+
diff --git a/doc/api_samples/os-extended-networks/network-create-resp.json b/doc/api_samples/os-extended-networks/network-create-resp.json
new file mode 100644
index 0000000000..4364e50b2d
--- /dev/null
+++ b/doc/api_samples/os-extended-networks/network-create-resp.json
@@ -0,0 +1,36 @@
+{
+ "network": {
+ "bridge": null,
+ "bridge_interface": null,
+ "broadcast": "10.20.105.255",
+ "cidr": "10.20.105.0/24",
+ "cidr_v6": null,
+ "created_at": null,
+ "deleted": null,
+ "deleted_at": null,
+ "dhcp_server": "10.20.105.2",
+ "dhcp_start": "10.20.105.2",
+ "dns1": null,
+ "dns2": null,
+ "enable_dhcp": false,
+ "gateway": "10.20.105.1",
+ "gateway_v6": null,
+ "host": null,
+ "id": "d7a17c0c-457e-4ab4-a99c-4fa1762f5359",
+ "injected": null,
+ "label": "new net 111",
+ "mtu": 9000,
+ "multi_host": null,
+ "netmask": "255.255.255.0",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": null,
+ "rxtx_base": null,
+ "share_address": true,
+ "updated_at": null,
+ "vlan": null,
+ "vpn_private_address": null,
+ "vpn_public_address": null,
+ "vpn_public_port": null
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-extended-networks/network-create-resp.xml b/doc/api_samples/os-extended-networks/network-create-resp.xml
new file mode 100644
index 0000000000..9f16171ed7
--- /dev/null
+++ b/doc/api_samples/os-extended-networks/network-create-resp.xml
@@ -0,0 +1,35 @@
+
+
+ None
+ None
+ 10.20.105.2
+ None
+ True
+ None
+ a931ead3-4c5c-4b85-a90e-b248ffa71134
+ None
+ None
+ 10.20.105.1
+ None
+
+ None
+ None
+ None
+ False
+ None
+ 10.20.105.255
+ 255.255.255.0
+ None
+ 10.20.105.0/24
+ None
+ None
+ False
+ None
+ None
+ None
+ 9000
+ None
+ None
+ 10.20.105.2
+ None
+
\ No newline at end of file
diff --git a/doc/api_samples/os-extended-networks/network-show-resp.json b/doc/api_samples/os-extended-networks/network-show-resp.json
new file mode 100644
index 0000000000..9741395c63
--- /dev/null
+++ b/doc/api_samples/os-extended-networks/network-show-resp.json
@@ -0,0 +1,36 @@
+{
+ "network": {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "10.0.0.7",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "2011-08-15T06:19:19.387525",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_server": "10.0.0.1",
+ "dhcp_start": "10.0.0.3",
+ "dns1": null,
+ "dns2": null,
+ "enable_dhcp": true,
+ "gateway": "10.0.0.1",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "20c8acc0-f747-4d71-a389-46d078ebf047",
+ "injected": false,
+ "label": "mynet_0",
+ "mtu": null,
+ "multi_host": false,
+ "netmask": "255.255.255.248",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "share_address": false,
+ "updated_at": "2011-08-16T09:26:13.048257",
+ "vlan": 100,
+ "vpn_private_address": "10.0.0.2",
+ "vpn_public_address": "127.0.0.1",
+ "vpn_public_port": 1000
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-extended-networks/network-show-resp.xml b/doc/api_samples/os-extended-networks/network-show-resp.xml
new file mode 100644
index 0000000000..2f3176fbc3
--- /dev/null
+++ b/doc/api_samples/os-extended-networks/network-show-resp.xml
@@ -0,0 +1,35 @@
+
+
+ br100
+ 1000
+ 10.0.0.3
+ eth0
+ False
+ 2011-08-16 09:26:13.048257
+ 20c8acc0-f747-4d71-a389-46d078ebf047
+ None
+ None
+ 10.0.0.1
+ None
+
+ None
+ 1234
+ 10.0.0.2
+ False
+ 100
+ 10.0.0.7
+ 255.255.255.248
+ False
+ 10.0.0.0/29
+ 127.0.0.1
+ False
+ True
+ None
+ 2011-08-15 06:19:19.387525
+ nsokolov-desktop
+ None
+ None
+ None
+ 10.0.0.1
+ None
+
\ No newline at end of file
diff --git a/doc/api_samples/os-extended-networks/networks-list-resp.json b/doc/api_samples/os-extended-networks/networks-list-resp.json
new file mode 100644
index 0000000000..49bdad5826
--- /dev/null
+++ b/doc/api_samples/os-extended-networks/networks-list-resp.json
@@ -0,0 +1,72 @@
+{
+ "networks": [
+ {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "10.0.0.7",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "2011-08-15T06:19:19.387525",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_server": "10.0.0.1",
+ "dhcp_start": "10.0.0.3",
+ "dns1": null,
+ "dns2": null,
+ "enable_dhcp": true,
+ "gateway": "10.0.0.1",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "20c8acc0-f747-4d71-a389-46d078ebf047",
+ "injected": false,
+ "label": "mynet_0",
+ "mtu": null,
+ "multi_host": false,
+ "netmask": "255.255.255.248",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "share_address": false,
+ "updated_at": "2011-08-16T09:26:13.048257",
+ "vlan": 100,
+ "vpn_private_address": "10.0.0.2",
+ "vpn_public_address": "127.0.0.1",
+ "vpn_public_port": 1000
+ },
+ {
+ "bridge": "br101",
+ "bridge_interface": "eth0",
+ "broadcast": "10.0.0.15",
+ "cidr": "10.0.0.10/29",
+ "cidr_v6": null,
+ "created_at": "2011-08-15T06:19:19.885495",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_server": "10.0.0.9",
+ "dhcp_start": "10.0.0.11",
+ "dns1": null,
+ "dns2": null,
+ "enable_dhcp": true,
+ "gateway": "10.0.0.9",
+ "gateway_v6": null,
+ "host": null,
+ "id": "20c8acc0-f747-4d71-a389-46d078ebf000",
+ "injected": false,
+ "label": "mynet_1",
+ "mtu": null,
+ "multi_host": false,
+ "netmask": "255.255.255.248",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": null,
+ "rxtx_base": null,
+ "share_address": false,
+ "updated_at": null,
+ "vlan": 101,
+ "vpn_private_address": "10.0.0.10",
+ "vpn_public_address": null,
+ "vpn_public_port": 1001
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-extended-networks/networks-list-resp.xml b/doc/api_samples/os-extended-networks/networks-list-resp.xml
new file mode 100644
index 0000000000..b3b6e8885f
--- /dev/null
+++ b/doc/api_samples/os-extended-networks/networks-list-resp.xml
@@ -0,0 +1,71 @@
+
+
+
+ br100
+ 1000
+ 10.0.0.3
+ eth0
+ False
+ 2011-08-16 09:26:13.048257
+ 20c8acc0-f747-4d71-a389-46d078ebf047
+ None
+ None
+ 10.0.0.1
+ None
+
+ None
+ 1234
+ 10.0.0.2
+ False
+ 100
+ 10.0.0.7
+ 255.255.255.248
+ False
+ 10.0.0.0/29
+ 127.0.0.1
+ False
+ True
+ None
+ 2011-08-15 06:19:19.387525
+ nsokolov-desktop
+ None
+ None
+ None
+ 10.0.0.1
+ None
+
+
+ br101
+ 1001
+ 10.0.0.11
+ eth0
+ False
+ None
+ 20c8acc0-f747-4d71-a389-46d078ebf000
+ None
+ None
+ 10.0.0.9
+ None
+
+ None
+ None
+ 10.0.0.10
+ False
+ 101
+ 10.0.0.15
+ 255.255.255.248
+ False
+ 10.0.0.10/29
+ None
+ False
+ True
+ None
+ 2011-08-15 06:19:19.885495
+ None
+ None
+ None
+ None
+ 10.0.0.9
+ None
+
+
\ No newline at end of file
diff --git a/doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json b/doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json
new file mode 100644
index 0000000000..44af92c433
--- /dev/null
+++ b/doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json
@@ -0,0 +1,27 @@
+{
+ "hypervisor": {
+ "cpu_info": "?",
+ "current_workload": 0,
+ "disk_available_least": 0,
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": 1,
+ "status": "enabled",
+ "state": "up",
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "service": {
+ "host": "5641188ab2964f88a21042b493585ff8",
+ "id": 2,
+ "disabled_reason": null
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+}
diff --git a/doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml b/doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml
new file mode 100644
index 0000000000..dbfec700ce
--- /dev/null
+++ b/doc/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml
@@ -0,0 +1,4 @@
+
+
+
+
diff --git a/doc/api_samples/os-hypervisors/hypervisors-detail-resp.json b/doc/api_samples/os-hypervisors/hypervisors-detail-resp.json
index b124901ea8..8ee96284d1 100644
--- a/doc/api_samples/os-hypervisors/hypervisors-detail-resp.json
+++ b/doc/api_samples/os-hypervisors/hypervisors-detail-resp.json
@@ -8,7 +8,7 @@
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
@@ -23,4 +23,4 @@
"vcpus_used": 0
}
]
-}
\ No newline at end of file
+}
diff --git a/doc/api_samples/os-hypervisors/hypervisors-detail-resp.xml b/doc/api_samples/os-hypervisors/hypervisors-detail-resp.xml
index 709f4fcd6d..6904c089c8 100644
--- a/doc/api_samples/os-hypervisors/hypervisors-detail-resp.xml
+++ b/doc/api_samples/os-hypervisors/hypervisors-detail-resp.xml
@@ -1,6 +1,6 @@
-
+
-
\ No newline at end of file
+
diff --git a/doc/api_samples/os-hypervisors/hypervisors-show-resp.json b/doc/api_samples/os-hypervisors/hypervisors-show-resp.json
index 59ac652331..02945469ab 100644
--- a/doc/api_samples/os-hypervisors/hypervisors-show-resp.json
+++ b/doc/api_samples/os-hypervisors/hypervisors-show-resp.json
@@ -7,7 +7,7 @@
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
diff --git a/doc/api_samples/os-hypervisors/hypervisors-show-resp.xml b/doc/api_samples/os-hypervisors/hypervisors-show-resp.xml
index 3b21782c07..471709fb24 100644
--- a/doc/api_samples/os-hypervisors/hypervisors-show-resp.xml
+++ b/doc/api_samples/os-hypervisors/hypervisors-show-resp.xml
@@ -1,4 +1,4 @@
-
+
diff --git a/doc/api_samples/os-server-list-multi-status/server-post-req.json b/doc/api_samples/os-server-list-multi-status/server-post-req.json
new file mode 100644
index 0000000000..2269848f46
--- /dev/null
+++ b/doc/api_samples/os-server-list-multi-status/server-post-req.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "flavorRef": "http://openstack.example.com/openstack/flavors/1",
+ "imageRef": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "personality": [
+ {
+ "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==",
+ "path": "/etc/banner.txt"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-server-list-multi-status/server-post-req.xml b/doc/api_samples/os-server-list-multi-status/server-post-req.xml
new file mode 100644
index 0000000000..2dbbb4438d
--- /dev/null
+++ b/doc/api_samples/os-server-list-multi-status/server-post-req.xml
@@ -0,0 +1,19 @@
+
+
+
+ Apache1
+
+
+
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+
+
+
diff --git a/doc/api_samples/os-server-list-multi-status/server-post-resp.json b/doc/api_samples/os-server-list-multi-status/server-post-resp.json
new file mode 100644
index 0000000000..29ce137179
--- /dev/null
+++ b/doc/api_samples/os-server-list-multi-status/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "MVk5HPrazHcG",
+ "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-server-list-multi-status/server-post-resp.xml b/doc/api_samples/os-server-list-multi-status/server-post-resp.xml
new file mode 100644
index 0000000000..9725f33bf4
--- /dev/null
+++ b/doc/api_samples/os-server-list-multi-status/server-post-resp.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/api_samples/os-server-list-multi-status/servers-list-resp.json b/doc/api_samples/os-server-list-multi-status/servers-list-resp.json
new file mode 100644
index 0000000000..2cc75eef19
--- /dev/null
+++ b/doc/api_samples/os-server-list-multi-status/servers-list-resp.json
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "616fb98f-46ca-475e-917e-2563e5a8cd19",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/616fb98f-46ca-475e-917e-2563e5a8cd19",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/616fb98f-46ca-475e-917e-2563e5a8cd19",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/doc/api_samples/os-server-list-multi-status/servers-list-resp.xml b/doc/api_samples/os-server-list-multi-status/servers-list-resp.xml
new file mode 100644
index 0000000000..cbd7892e73
--- /dev/null
+++ b/doc/api_samples/os-server-list-multi-status/servers-list-resp.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 83723a24ab..66e94be986 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -55,8 +55,8 @@
# The encoding of source files.
#source_encoding = 'utf-8'
-# The master toctree document.
-master_doc = 'index'
+# The main toctree document.
+main_doc = 'index'
# General information about the project.
project = u'nova'
@@ -194,7 +194,7 @@
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
diff --git a/doc/source/devref/api.rst b/doc/source/devref/api.rst
deleted file mode 100644
index 8827b8f17e..0000000000
--- a/doc/source/devref/api.rst
+++ /dev/null
@@ -1,270 +0,0 @@
-..
- Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-API Endpoint
-============
-
-Nova has a system for managing multiple APIs on different subdomains.
-Currently there is support for the OpenStack API, as well as the Amazon EC2
-API.
-
-Common Components
------------------
-
-The :mod:`nova.api` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. automodule:: nova.api
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`nova.api.cloud` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.api.cloud
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-OpenStack API
--------------
-
-The :mod:`openstack` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. automodule:: nova.api.openstack
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`auth` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. automodule:: nova.api.openstack.auth
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`backup_schedules` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. automodule:: nova.api.openstack.backup_schedules
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`faults` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. automodule:: nova.api.openstack.faults
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`flavors` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. automodule:: nova.api.openstack.flavors
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`images` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. automodule:: nova.api.openstack.images
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`servers` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. automodule:: nova.api.openstack.servers
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`sharedipgroups` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.. automodule:: nova.api.openstack.sharedipgroups
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-EC2 API
--------
-
-The :mod:`nova.api.ec2` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.api.ec2
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`apirequest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.api.ec2.apirequest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`cloud` Module
-~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.api.ec2.cloud
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`images` Module
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.api.ec2.images
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`metadatarequesthandler` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.api.ec2.metadatarequesthandler
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-Tests
------
-
-The :mod:`api_unittest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.api_unittest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`api_integration` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.api_integration
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`cloud_unittest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.cloud_unittest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`api.fakes` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.api.fakes
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`api.test_wsgi` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.api.test_wsgi
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`test_api` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.api.openstack.test_api
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`test_auth` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.api.openstack.test_auth
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`test_faults` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.api.openstack.test_faults
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`test_flavors` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.api.openstack.test_flavors
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`test_images` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.api.openstack.test_images
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`test_servers` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.api.openstack.test_servers
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`test_sharedipgroups` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.api.openstack.test_sharedipgroups
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
diff --git a/doc/source/devref/compute.rst b/doc/source/devref/compute.rst
deleted file mode 100644
index 00da777e80..0000000000
--- a/doc/source/devref/compute.rst
+++ /dev/null
@@ -1,140 +0,0 @@
-..
- Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-
-Virtualization
-==============
-
-
-Compute
--------
-
-Documentation for the compute manager and related files. For reading about
-a specific virtualization backend, read Drivers_.
-
-
-The :mod:`nova.compute.manager` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.compute.manager
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`nova.virt.connection` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.virt.connection
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`nova.compute.disk` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.compute.disk
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`nova.virt.images` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.virt.images
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.compute.flavors` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.compute.flavors
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.compute.power_state` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.compute.power_state
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Drivers
--------
-
-
-The :mod:`nova.virt.libvirt_conn` Driver
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.virt.libvirt_conn
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.virt.xenapi` Driver
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.virt.xenapi
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.virt.fake` Driver
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.virt.fake
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Tests
------
-
-The :mod:`compute_unittest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.compute_unittest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`virt_unittest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.virt_unittest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/devref/database.rst b/doc/source/devref/database.rst
deleted file mode 100644
index a26e487057..0000000000
--- a/doc/source/devref/database.rst
+++ /dev/null
@@ -1,63 +0,0 @@
-..
- Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-The Database Layer
-==================
-
-The :mod:`nova.db.api` Module
------------------------------
-
-.. automodule:: nova.db.api
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The Sqlalchemy Driver
----------------------
-
-The :mod:`nova.db.sqlalchemy.api` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.db.sqlalchemy.api
- :noindex:
-
-The :mod:`nova.db.sqlalchemy.models` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.db.sqlalchemy.models
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`nova.db.sqlalchemy.session` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.db.sqlalchemy.session
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Tests
------
-
-Tests are lacking for the db api layer and for the sqlalchemy driver.
-Failures in the drivers would be detected in other test cases, though.
diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst
index 788387925f..652e71a208 100644
--- a/doc/source/devref/development.environment.rst
+++ b/doc/source/devref/development.environment.rst
@@ -40,7 +40,7 @@ environments with venv are also available with the source code.
The easiest way to build a fully functional development environment is
with DevStack. Create a machine (such as a VM or Vagrant box) running a
distribution supported by DevStack and install DevStack there. For
-example, there is a Vagrant script for DevStack at https://github.com/jogo/DevstackUp.
+example, there is a Vagrant script for DevStack at http://git.openstack.org/cgit/openstack-dev/devstack-vagrant/.
.. note::
@@ -60,15 +60,19 @@ Install the prerequisite packages.
On Ubuntu::
- sudo apt-get install python-dev libssl-dev python-pip git-core libxml2-dev libxslt-dev pkg-config libffi-dev libpq-dev libmysqlclient-dev
+ sudo apt-get install python-dev libssl-dev python-pip git-core libxml2-dev libxslt-dev pkg-config libffi-dev libpq-dev libmysqlclient-dev libvirt-dev graphviz
On Ubuntu Precise (12.04) you may also need to add the following packages::
sudo apt-get build-dep python-mysqldb
+ # enable cloud-archive to get the latest libvirt
+ sudo apt-get install python-software-properties
+ sudo add-apt-repository cloud-archive:icehouse
+ sudo apt-get install libvirt-dev
On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux)::
- sudo yum install python-devel openssl-devel python-pip git gcc libxslt-devel mysql-devel postgresql-devel libffi-devel
+ sudo yum install python-devel openssl-devel python-pip git gcc libxslt-devel mysql-devel postgresql-devel libffi-devel libvirt-devel graphviz
sudo pip-python install tox
@@ -123,7 +127,7 @@ Using fake computes for tests
-----------------------------
The number of instances supported by fake computes is not limited by physical
-constraints. It allows to perform stress tests on a deployment with few
+constraints. It allows you to perform stress tests on a deployment with few
resources (typically a laptop). But you must avoid using scheduler filters
limiting the number of instances per compute (like RamFilter, DiskFilter,
AggregateCoreFilter), otherwise they will limit the number of instances per
diff --git a/doc/source/devref/fakes.rst b/doc/source/devref/fakes.rst
deleted file mode 100644
index d98154871f..0000000000
--- a/doc/source/devref/fakes.rst
+++ /dev/null
@@ -1,65 +0,0 @@
-..
- Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-Fake Drivers
-============
-
-.. todo:: document general info about fakes
-
-When the real thing isn't available and you have some development to do these
-fake implementations of various drivers let you get on with your day.
-
-
-The :mod:`nova.virt.fake` Module
---------------------------------
-
-.. automodule:: nova.virt.fake
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.auth.fakeldap` Module
-------------------------------------
-
-.. automodule:: nova.auth.fakeldap
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :class:`nova.tests.service_unittest.FakeManager` Class
-----------------------------------------------------------
-
-.. autoclass:: nova.tests.service_unittest.FakeManager
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.tests.api.openstack.fakes` Module
-------------------------------------------------
-
-.. automodule:: nova.tests.api.openstack.fakes
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
diff --git a/doc/source/devref/filter_scheduler.rst b/doc/source/devref/filter_scheduler.rst
index a5e20d83b7..479324fef1 100644
--- a/doc/source/devref/filter_scheduler.rst
+++ b/doc/source/devref/filter_scheduler.rst
@@ -94,6 +94,11 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`):
``disk_allocation_ration`` setting. It's virtual disk to physical disk
allocation ratio and it's 1.0 by default. The total allow allocated disk size will
be physical disk multiplied this ratio.
+* |AggregateDiskFilter| - filters hosts by disk allocation with per-aggregate
+ ``disk_allocation_ratio`` setting. If no per-aggregate value is found, it will
+ fall back to the global default ``disk_allocation_ratio``. If more than one value
+ is found for a host (meaning the host is in two or more different aggregates with
+ different ratio settings), the minimum value will be used.
* |NumInstancesFilter| - filters hosts by number of running instances on it.
hosts with too many instances will be filtered.
``max_instances_per_host`` setting. Maximum number of instances allowed to run on
@@ -104,6 +109,12 @@ There are some standard filter classes to use (:mod:`nova.scheduler.filters`):
``max_io_ops_per_host`` setting. Maximum number of I/O intensive instances allowed to
run on this host, the host will be ignored by scheduler if more than ``max_io_ops_per_host``
instances such as build/resize/snapshot etc are running on it.
+* |AggregateIoOpsFilter| - filters hosts by I/O operations with per-aggregate
+ ``max_io_ops_per_host`` setting. If no per-aggregate value is found, it will
+ fall back to the global default ``max_io_ops_per_host``. If more than
+ one value is found for a host (meaning the host is in two or more different
+ aggregates with different max io operations settings), the minimum value
+ will be used.
* |PciPassthroughFilter| - Filter that schedules instances on a host if the host
has devices to meet the device requests in the 'extra_specs' for the flavor.
* |SimpleCIDRAffinityFilter| - allows to put a new instance on a host within
@@ -354,8 +365,10 @@ in :mod:``nova.tests.scheduler``.
.. |RamFilter| replace:: :class:`RamFilter `
.. |AggregateRamFilter| replace:: :class:`AggregateRamFilter `
.. |DiskFilter| replace:: :class:`DiskFilter `
+.. |AggregateDiskFilter| replace:: :class:`AggregateDiskFilter `
.. |NumInstancesFilter| replace:: :class:`NumInstancesFilter `
.. |IoOpsFilter| replace:: :class:`IoOpsFilter `
+.. |AggregateIoOpsFilter| replace:: :class:`AggregateIoOpsFilter `
.. |PciPassthroughFilter| replace:: :class:`PciPassthroughFilter `
.. |SimpleCIDRAffinityFilter| replace:: :class:`SimpleCIDRAffinityFilter `
.. |GroupAntiAffinityFilter| replace:: :class:`GroupAntiAffinityFilter `
@@ -366,8 +379,11 @@ in :mod:``nova.tests.scheduler``.
.. |TrustedFilter| replace:: :class:`TrustedFilter `
.. |TypeAffinityFilter| replace:: :class:`TypeAffinityFilter `
.. |AggregateTypeAffinityFilter| replace:: :class:`AggregateTypeAffinityFilter `
+.. |ServerGroupAntiAffinityFilter| replace:: :class:`ServerGroupAntiAffinityFilter `
+.. |ServerGroupAffinityFilter| replace:: :class:`ServerGroupAffinityFilter `
.. |AggregateInstanceExtraSpecsFilter| replace:: :class:`AggregateInstanceExtraSpecsFilter `
.. |AggregateMultiTenancyIsolation| replace:: :class:`AggregateMultiTenancyIsolation `
.. |RamWeigher| replace:: :class:`RamWeigher `
.. |AggregateImagePropertiesIsolation| replace:: :class:`AggregateImagePropertiesIsolation `
.. |MetricsFilter| replace:: :class:`MetricsFilter `
+.. |MetricsWeigher| replace:: :class:`MetricsWeigher `
diff --git a/doc/source/devref/il8n.rst b/doc/source/devref/il8n.rst
index 0073950551..3898e5302c 100644
--- a/doc/source/devref/il8n.rst
+++ b/doc/source/devref/il8n.rst
@@ -21,4 +21,4 @@ in nova/tests/test_localization.py.
The ``_()`` function is found by doing::
- from nova.openstack.common.gettextutils import _
+ from nova.i18n import _
diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst
index b9117ff192..10148394eb 100644
--- a/doc/source/devref/index.rst
+++ b/doc/source/devref/index.rst
@@ -63,28 +63,12 @@ Other Resources
gerrit
jenkins
-API Reference
--------------
-.. toctree::
- :maxdepth: 3
-
- ../api/autoindex
-
Module Reference
----------------
.. toctree::
:maxdepth: 3
services
- database
- compute
- network
- api
- scheduler
- fakes
- nova
- objectstore
- glance
Indices and tables
diff --git a/doc/source/devref/network.rst b/doc/source/devref/network.rst
deleted file mode 100644
index 56e9682ebd..0000000000
--- a/doc/source/devref/network.rst
+++ /dev/null
@@ -1,49 +0,0 @@
-..
- Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-Networking
-==========
-
-The :mod:`nova.network.manager` Module
---------------------------------------
-
-.. automodule:: nova.network.manager
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-The :mod:`nova.network.linux_net` Driver
-----------------------------------------
-
-.. automodule:: nova.network.linux_net
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-Tests
------
-
-The :mod:`network_unittest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.network_unittest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/devref/nova.rst b/doc/source/devref/nova.rst
deleted file mode 100644
index beca99ecd5..0000000000
--- a/doc/source/devref/nova.rst
+++ /dev/null
@@ -1,215 +0,0 @@
-..
- Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-Common and Misc Libraries
-=========================
-
-Libraries common throughout Nova or just ones that haven't been categorized
-very well yet.
-
-
-The :mod:`nova.adminclient` Module
-----------------------------------
-
-.. automodule:: nova.adminclient
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.context` Module
-------------------------------
-
-.. automodule:: nova.context
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.exception` Module
---------------------------------
-
-.. automodule:: nova.exception
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.flags` Module
-----------------------------
-
-.. automodule:: nova.flags
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.process` Module
-------------------------------
-
-.. automodule:: nova.process
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.rpc` Module
---------------------------
-
-.. automodule:: nova.rpc
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.server` Module
------------------------------
-
-.. automodule:: nova.server
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.test` Module
----------------------------
-
-.. automodule:: nova.test
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.utils` Module
-----------------------------
-
-.. automodule:: nova.utils
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.validate` Module
--------------------------------
-
-.. automodule:: nova.validate
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.wsgi` Module
----------------------------
-
-.. automodule:: nova.wsgi
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Tests
------
-
-The :mod:`declare_flags` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.declare_flags
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`fake_flags` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.fake_flags
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`flags_unittest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.flags_unittest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`process_unittest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.process_unittest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`real_flags` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.real_flags
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`rpc_unittest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.rpc_unittest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`runtime_flags` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.runtime_flags
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`validator_unittest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.validator_unittest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/devref/objectstore.rst b/doc/source/devref/objectstore.rst
deleted file mode 100644
index f140e85e99..0000000000
--- a/doc/source/devref/objectstore.rst
+++ /dev/null
@@ -1,71 +0,0 @@
-..
- Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-Objectstore - File Storage Service
-==================================
-
-The :mod:`nova.objectstore.handler` Module
-------------------------------------------
-
-.. automodule:: nova.objectstore.handler
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.objectstore.bucket` Module
------------------------------------------
-
-.. automodule:: nova.objectstore.bucket
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.objectstore.stored` Module
------------------------------------------
-
-.. automodule:: nova.objectstore.stored
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.objecstore.image` Module
-----------------------------------------
-
-.. automodule:: nova.objectstore.image
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Tests
------
-
-The :mod:`objectstore_unittest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.objectstore_unittest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/devref/policies.rst b/doc/source/devref/policies.rst
index 28777bc6a9..df5286bbe6 100644
--- a/doc/source/devref/policies.rst
+++ b/doc/source/devref/policies.rst
@@ -18,7 +18,7 @@ Out Of Tree Support
===================
While nova has many entrypoints and other places in the code that allow for
-wiring in out of tree code. Upstream doesn't actively make any guarantees
+wiring in out of tree code, upstream doesn't actively make any guarantees
about these extensibility points; we don't support them, make any guarantees
about compatibility, stability, etc.
@@ -34,3 +34,13 @@ this wrong.
This policy is in place to prevent us from making backwards incompatible
changes to APIs.
+
+Patches and Reviews
+===================
+
+Merging a patch requires a non-trivial amount of reviewer resources.
+As a patch author, you should try to offset the reviewer resources
+spent on your patch by reviewing other patches. If no one does this, the review
+team (cores and otherwise) become spread too thin.
+
+For review guidelines see: https://wiki.openstack.org/wiki/ReviewChecklist
diff --git a/doc/source/devref/scheduler.rst b/doc/source/devref/scheduler.rst
deleted file mode 100644
index 6f0b8edf56..0000000000
--- a/doc/source/devref/scheduler.rst
+++ /dev/null
@@ -1,61 +0,0 @@
-..
- Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-Scheduler
-=========
-
-The :mod:`nova.scheduler.manager` Module
-----------------------------------------
-
-.. automodule:: nova.scheduler.manager
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.scheduler.driver` Module
----------------------------------------
-
-.. automodule:: nova.scheduler.driver
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-The :mod:`nova.scheduler.chance` Driver
----------------------------------------
-
-.. automodule:: nova.scheduler.chance
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
-
-
-Tests
------
-
-The :mod:`scheduler_unittest` Module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. automodule:: nova.tests.scheduler_unittest
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/source/devref/vmstates.rst b/doc/source/devref/vmstates.rst
index 80075124fd..4ab800ec69 100644
--- a/doc/source/devref/vmstates.rst
+++ b/doc/source/devref/vmstates.rst
@@ -88,6 +88,7 @@ task states for various commands issued by the user:
rescue -> error
active -> rescue
stopped -> rescue
+ error -> rescue
unrescue [shape="rectangle"]
unrescue -> active
@@ -139,7 +140,9 @@ task states for various commands issued by the user:
reboot -> error
active -> reboot
stopped -> reboot
- rescued -> reboot
+ paused -> reboot
+ suspended -> reboot
+ error -> reboot
live_migrate [shape="rectangle"]
live_migrate -> active
@@ -159,4 +162,4 @@ The following diagram shows the sequence of VM states, task states, and
power states when a new VM instance is created.
-.. image:: /images/run_instance_walkthrough.png
\ No newline at end of file
+.. image:: /images/run_instance_walkthrough.png
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 077fc2920d..123506fb09 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -47,6 +47,8 @@ Developer Docs
:maxdepth: 1
devref/index
+ man/index
+ api/autoindex
API Extensions
==============
diff --git a/doc/source/devref/glance.rst b/doc/source/man/index.rst
similarity index 56%
rename from doc/source/devref/glance.rst
rename to doc/source/man/index.rst
index 9a1c14d58b..af0e4b83c7 100644
--- a/doc/source/devref/glance.rst
+++ b/doc/source/man/index.rst
@@ -1,6 +1,6 @@
..
Copyright 2010-2011 United States Government as represented by the
- Administrator of the National Aeronautics and Space Administration.
+ Administrator of the National Aeronautics and Space Administration.
All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,14 +15,35 @@
License for the specific language governing permissions and limitations
under the License.
-Glance Integration - The Future of File Storage
-===============================================
+Command-line Utilities
+======================
-The :mod:`nova.image.service` Module
-------------------------------------
+In this section you will find information on Nova's command line utilities.
+
+Reference
+---------
+.. toctree::
+ :maxdepth: 3
+
+ nova-all
+ nova-api-ec2
+ nova-api-metadata
+ nova-api-os-compute
+ nova-api
+ nova-baremetal-deploy-helper
+ nova-baremetal-manage
+ nova-cert
+ nova-compute
+ nova-conductor
+ nova-console
+ nova-consoleauth
+ nova-dhcpbridge
+ nova-manage
+ nova-network
+ nova-novncproxy
+ nova-objectstore
+ nova-rootwrap
+ nova-scheduler
+ nova-spicehtml5proxy
+ nova-xvpvncproxy
-.. automodule:: nova.image.service
- :noindex:
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/v3/api_samples/all_extensions/server-get-resp.json b/doc/v3/api_samples/all_extensions/server-get-resp.json
index 8826f360dc..803d5c37c1 100644
--- a/doc/v3/api_samples/all_extensions/server-get-resp.json
+++ b/doc/v3/api_samples/all_extensions/server-get-resp.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "9cc36101a27c2a69c1a18241f6228454d9d7f466bd90c62db8e8b856",
+ "hostId": "9cc36101a27c2a69c1a18241f6228454d9d7f466bd90c62db8e8b856",
"id": "f474386b-4fb6-4e1f-b1d5-d6bf4437f7d5",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
@@ -48,15 +48,15 @@
"name": "new-server-test",
"os-access-ips:access_ip_v4": "",
"os-access-ips:access_ip_v6": "",
- "os-config-drive:config_drive": "",
+ "config_drive": "",
"os-extended-availability-zone:availability_zone": "nova",
- "os-extended-server-attributes:host": "b8b357f7100d4391828f2177c922ef93",
- "os-extended-server-attributes:hypervisor_hostname": "fake-mini",
- "os-extended-server-attributes:instance_name": "instance-00000001",
- "os-extended-status:locked_by": null,
- "os-extended-status:power_state": 1,
- "os-extended-status:task_state": null,
- "os-extended-status:vm_state": "active",
+ "OS-EXT-SRV-ATTR:host": "b8b357f7100d4391828f2177c922ef93",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-STS:locked_by": null,
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
"os-extended-volumes:volumes_attached": [],
"os-pci:pci_devices": [
{
diff --git a/doc/v3/api_samples/all_extensions/server-post-req.json b/doc/v3/api_samples/all_extensions/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/all_extensions/server-post-req.json
+++ b/doc/v3/api_samples/all_extensions/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/all_extensions/server-post-resp.json b/doc/v3/api_samples/all_extensions/server-post-resp.json
index 495c2a9c7d..1557202de0 100644
--- a/doc/v3/api_samples/all_extensions/server-post-resp.json
+++ b/doc/v3/api_samples/all_extensions/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "zPnp2GseTqG4",
+ "adminPass": "zPnp2GseTqG4",
"id": "8195065c-fea4-4d57-b93f-5c5c63fe90e8",
"links": [
{
diff --git a/doc/v3/api_samples/all_extensions/servers-details-resp.json b/doc/v3/api_samples/all_extensions/servers-details-resp.json
index 9467fb4a40..bf846017b6 100644
--- a/doc/v3/api_samples/all_extensions/servers-details-resp.json
+++ b/doc/v3/api_samples/all_extensions/servers-details-resp.json
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "f1e160ad2bf07084f3d3e0dfdd0795d80da18a60825322c15775c0dd",
+ "hostId": "f1e160ad2bf07084f3d3e0dfdd0795d80da18a60825322c15775c0dd",
"id": "9cbefc35-d372-40c5-88e2-9fda1b6ea12c",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
@@ -49,15 +49,15 @@
"name": "new-server-test",
"os-access-ips:access_ip_v4": "",
"os-access-ips:access_ip_v6": "",
- "os-config-drive:config_drive": "",
+ "config_drive": "",
"os-extended-availability-zone:availability_zone": "nova",
- "os-extended-server-attributes:host": "c3f14e9812ad496baf92ccfb3c61e15f",
- "os-extended-server-attributes:hypervisor_hostname": "fake-mini",
- "os-extended-server-attributes:instance_name": "instance-00000001",
- "os-extended-status:locked_by": null,
- "os-extended-status:power_state": 1,
- "os-extended-status:task_state": null,
- "os-extended-status:vm_state": "active",
+ "OS-EXT-SRV-ATTR:host": "c3f14e9812ad496baf92ccfb3c61e15f",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-STS:locked_by": null,
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
"os-extended-volumes:volumes_attached": [],
"os-pci:pci_devices": [
{
diff --git a/doc/v3/api_samples/consoles/server-post-req.json b/doc/v3/api_samples/consoles/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/consoles/server-post-req.json
+++ b/doc/v3/api_samples/consoles/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/consoles/server-post-resp.json b/doc/v3/api_samples/consoles/server-post-resp.json
index a83ab07f7d..0dfaf9148e 100644
--- a/doc/v3/api_samples/consoles/server-post-resp.json
+++ b/doc/v3/api_samples/consoles/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "C3r5xKS73Y7S",
+ "adminPass": "C3r5xKS73Y7S",
"id": "3f19c120-f64a-4faf-848e-33900b752f83",
"links": [
{
diff --git a/doc/v3/api_samples/os-access-ips/server-action-rebuild-resp.json b/doc/v3/api_samples/os-access-ips/server-action-rebuild-resp.json
index 3d92616716..5c0013842f 100644
--- a/doc/v3/api_samples/os-access-ips/server-action-rebuild-resp.json
+++ b/doc/v3/api_samples/os-access-ips/server-action-rebuild-resp.json
@@ -10,7 +10,7 @@
}
]
},
- "admin_password": "99WHAxN8gpvg",
+ "adminPass": "99WHAxN8gpvg",
"created": "2013-11-06T07:51:09Z",
"flavor": {
"id": "1",
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "5c8072dbcda8ce3f26deb6662bd7718e1a6d349bdf2296911d1be4ac",
+ "hostId": "5c8072dbcda8ce3f26deb6662bd7718e1a6d349bdf2296911d1be4ac",
"id": "53a63a19-c145-47f8-9ae5-b39d6bff33ec",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-access-ips/server-action-rebuild.json b/doc/v3/api_samples/os-access-ips/server-action-rebuild.json
index 678bd647ba..9285071478 100644
--- a/doc/v3/api_samples/os-access-ips/server-action-rebuild.json
+++ b/doc/v3/api_samples/os-access-ips/server-action-rebuild.json
@@ -2,7 +2,7 @@
"rebuild" : {
"os-access-ips:access_ip_v4": "4.3.2.1",
"os-access-ips:access_ip_v6": "80fe::",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"name" : "new-server-test",
"metadata" : {
"meta_var" : "meta_val"
diff --git a/doc/v3/api_samples/os-access-ips/server-get-resp.json b/doc/v3/api_samples/os-access-ips/server-get-resp.json
index 5810f469ad..e0ce062060 100644
--- a/doc/v3/api_samples/os-access-ips/server-get-resp.json
+++ b/doc/v3/api_samples/os-access-ips/server-get-resp.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "b3a6fd97c027e18d6d9c7506eea8a236cf2ceca420cfdfe0239a64a8",
+ "hostId": "b3a6fd97c027e18d6d9c7506eea8a236cf2ceca420cfdfe0239a64a8",
"id": "5eedbf0c-c303-4ed3-933a-a4d3732cfa0a",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-access-ips/server-post-req.json b/doc/v3/api_samples/os-access-ips/server-post-req.json
index b0f0e90e53..5bd3781f3e 100644
--- a/doc/v3/api_samples/os-access-ips/server-post-req.json
+++ b/doc/v3/api_samples/os-access-ips/server-post-req.json
@@ -3,8 +3,8 @@
"os-access-ips:access_ip_v4": "1.2.3.4",
"os-access-ips:access_ip_v6": "fe80::",
"name" : "new-server-test",
- "image_ref" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/openstack/flavors/1",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/doc/v3/api_samples/os-access-ips/server-post-resp.json b/doc/v3/api_samples/os-access-ips/server-post-resp.json
index d3edc5eb41..a756cde26e 100644
--- a/doc/v3/api_samples/os-access-ips/server-post-resp.json
+++ b/doc/v3/api_samples/os-access-ips/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "n7JGBda664QG",
+ "adminPass": "n7JGBda664QG",
"id": "934760e1-2b0b-4f9e-a916-eac1e69839dc",
"links": [
{
diff --git a/doc/v3/api_samples/os-access-ips/server-put-resp.json b/doc/v3/api_samples/os-access-ips/server-put-resp.json
index 620574c5c6..f8f0cf3d9b 100644
--- a/doc/v3/api_samples/os-access-ips/server-put-resp.json
+++ b/doc/v3/api_samples/os-access-ips/server-put-resp.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "ea0fd522e5bc2fea872429b331304a6f930f2d9aa2a5dc95b3c6061a",
+ "hostId": "ea0fd522e5bc2fea872429b331304a6f930f2d9aa2a5dc95b3c6061a",
"id": "fea9595c-ce6e-4565-987e-2d301fe056ac",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-access-ips/servers-details-resp.json b/doc/v3/api_samples/os-access-ips/servers-details-resp.json
index c757662332..bce7408599 100644
--- a/doc/v3/api_samples/os-access-ips/servers-details-resp.json
+++ b/doc/v3/api_samples/os-access-ips/servers-details-resp.json
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "9896cb12c9845becf1b9b06c8ff5b131d20300f83e2cdffc92e3f4a4",
+ "hostId": "9896cb12c9845becf1b9b06c8ff5b131d20300f83e2cdffc92e3f4a4",
"id": "934760e1-2b0b-4f9e-a916-eac1e69839dc",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-admin-actions/server-post-req.json b/doc/v3/api_samples/os-admin-actions/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-admin-actions/server-post-req.json
+++ b/doc/v3/api_samples/os-admin-actions/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-admin-actions/server-post-resp.json b/doc/v3/api_samples/os-admin-actions/server-post-resp.json
index 270cb84634..353517739f 100644
--- a/doc/v3/api_samples/os-admin-actions/server-post-resp.json
+++ b/doc/v3/api_samples/os-admin-actions/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "DM3QzjhGTzLB",
+ "adminPass": "DM3QzjhGTzLB",
"id": "bebeec79-497e-4711-a311-d0d2e3dfc73b",
"links": [
{
diff --git a/doc/v3/api_samples/os-admin-password/admin-password-change-password.json b/doc/v3/api_samples/os-admin-password/admin-password-change-password.json
index 6fbfbea80f..94855a4e8c 100644
--- a/doc/v3/api_samples/os-admin-password/admin-password-change-password.json
+++ b/doc/v3/api_samples/os-admin-password/admin-password-change-password.json
@@ -1,5 +1,5 @@
{
- "change_password" : {
- "admin_password" : "foo"
+ "changePassword" : {
+ "adminPass" : "foo"
}
-}
\ No newline at end of file
+}
diff --git a/doc/v3/api_samples/os-admin-password/server-post-req.json b/doc/v3/api_samples/os-admin-password/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-admin-password/server-post-req.json
+++ b/doc/v3/api_samples/os-admin-password/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-admin-password/server-post-resp.json b/doc/v3/api_samples/os-admin-password/server-post-resp.json
index b67cb859bd..e81b2aab7c 100644
--- a/doc/v3/api_samples/os-admin-password/server-post-resp.json
+++ b/doc/v3/api_samples/os-admin-password/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "oCYTjD2KnRtB",
+ "adminPass": "oCYTjD2KnRtB",
"id": "aa3130b9-dffc-417b-aa03-93d5394a2fa6",
"links": [
{
diff --git a/doc/v3/api_samples/os-agents/agent-post-req.json b/doc/v3/api_samples/os-agents/agent-post-req.json
index 217993b17f..1913498547 100644
--- a/doc/v3/api_samples/os-agents/agent-post-req.json
+++ b/doc/v3/api_samples/os-agents/agent-post-req.json
@@ -5,6 +5,6 @@
"architecture": "x86",
"version": "8.0",
"md5hash": "add6bb58e139be103324d04d82d8f545",
- "url": "xxxxxxxxxxxx"
+ "url": "http://example.com/path/to/resource"
}
-}
\ No newline at end of file
+}
diff --git a/doc/v3/api_samples/os-agents/agent-post-resp.json b/doc/v3/api_samples/os-agents/agent-post-resp.json
index f6c760cc67..24ddede90b 100644
--- a/doc/v3/api_samples/os-agents/agent-post-resp.json
+++ b/doc/v3/api_samples/os-agents/agent-post-resp.json
@@ -5,7 +5,7 @@
"hypervisor": "hypervisor",
"md5hash": "add6bb58e139be103324d04d82d8f545",
"os": "os",
- "url": "xxxxxxxxxxxx",
+ "url": "http://example.com/path/to/resource",
"version": "8.0"
}
-}
\ No newline at end of file
+}
diff --git a/doc/v3/api_samples/os-agents/agent-update-put-req.json b/doc/v3/api_samples/os-agents/agent-update-put-req.json
index e166abf9ee..89cbcaba39 100644
--- a/doc/v3/api_samples/os-agents/agent-update-put-req.json
+++ b/doc/v3/api_samples/os-agents/agent-update-put-req.json
@@ -1,6 +1,6 @@
{
"agent": {
- "url": "xxx://xxxx/xxx/xxx",
+ "url": "http://example.com/path/to/resource",
"md5hash": "add6bb58e139be103324d04d82d8f545",
"version": "7.0"
}
diff --git a/doc/v3/api_samples/os-agents/agent-update-put-resp.json b/doc/v3/api_samples/os-agents/agent-update-put-resp.json
index 866994e4c9..2964c0f894 100644
--- a/doc/v3/api_samples/os-agents/agent-update-put-resp.json
+++ b/doc/v3/api_samples/os-agents/agent-update-put-resp.json
@@ -2,7 +2,7 @@
"agent": {
"agent_id": 1,
"md5hash": "add6bb58e139be103324d04d82d8f545",
- "url": "xxx://xxxx/xxx/xxx",
+ "url": "http://example.com/path/to/resource",
"version": "7.0"
}
-}
\ No newline at end of file
+}
diff --git a/doc/v3/api_samples/os-agents/agents-get-resp.json b/doc/v3/api_samples/os-agents/agents-get-resp.json
index 73ba45c240..92e14e1dc5 100644
--- a/doc/v3/api_samples/os-agents/agents-get-resp.json
+++ b/doc/v3/api_samples/os-agents/agents-get-resp.json
@@ -6,8 +6,8 @@
"hypervisor": "hypervisor",
"md5hash": "add6bb58e139be103324d04d82d8f545",
"os": "os",
- "url": "xxxxxxxxxxxx",
+ "url": "http://example.com/path/to/resource",
"version": "8.0"
}
]
-}
\ No newline at end of file
+}
diff --git a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json
index af7bdd7af4..d14e791404 100644
--- a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json
+++ b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json
@@ -1,5 +1,5 @@
{
- "interface_attachment": {
+ "interfaceAttachment": {
"port_id": "ce531f90-199f-48c0-816c-13e38010b442"
}
}
\ No newline at end of file
diff --git a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json
index 93b68d9c69..9dff234366 100644
--- a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json
+++ b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json
@@ -1,5 +1,5 @@
{
- "interface_attachment": {
+ "interfaceAttachment": {
"fixed_ips": [
{
"ip_address": "192.168.1.3",
diff --git a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json
index 9d977378b7..192f9a6487 100644
--- a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json
+++ b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json
@@ -1,5 +1,5 @@
{
- "interface_attachments": [
+ "interfaceAttachments": [
{
"fixed_ips": [
{
diff --git a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json
index 93b68d9c69..9dff234366 100644
--- a/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json
+++ b/doc/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json
@@ -1,5 +1,5 @@
{
- "interface_attachment": {
+ "interfaceAttachment": {
"fixed_ips": [
{
"ip_address": "192.168.1.3",
diff --git a/doc/v3/api_samples/os-attach-interfaces/server-post-req.json b/doc/v3/api_samples/os-attach-interfaces/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-attach-interfaces/server-post-req.json
+++ b/doc/v3/api_samples/os-attach-interfaces/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-attach-interfaces/server-post-resp.json b/doc/v3/api_samples/os-attach-interfaces/server-post-resp.json
index cc809d3075..4ef70a0a90 100644
--- a/doc/v3/api_samples/os-attach-interfaces/server-post-resp.json
+++ b/doc/v3/api_samples/os-attach-interfaces/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "fjPxt8d8YcSR",
+ "adminPass": "fjPxt8d8YcSR",
"id": "c937be78-c423-495b-a99a-e590ab6f30ba",
"links": [
{
diff --git a/doc/v3/api_samples/os-availability-zone/server-post-req.json b/doc/v3/api_samples/os-availability-zone/server-post-req.json
index 0377284764..83df44d977 100644
--- a/doc/v3/api_samples/os-availability-zone/server-post-req.json
+++ b/doc/v3/api_samples/os-availability-zone/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/openstack/flavors/1",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
"os-availability-zone:availability_zone" : "test"
"metadata" : {
"My Server Name" : "Apache1"
diff --git a/doc/v3/api_samples/os-availability-zone/server-post-resp.json b/doc/v3/api_samples/os-availability-zone/server-post-resp.json
index df24e0d2b9..7cd8d888b5 100644
--- a/doc/v3/api_samples/os-availability-zone/server-post-resp.json
+++ b/doc/v3/api_samples/os-availability-zone/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "a2apKsfp7Rom",
+ "adminPass": "a2apKsfp7Rom",
"id": "e88c3898-e971-42e5-8325-b7ff921efb15",
"links": [
{
diff --git a/doc/v3/api_samples/os-config-drive/server-config-drive-get-resp.json b/doc/v3/api_samples/os-config-drive/server-config-drive-get-resp.json
index d31c241f1a..f2938cee7f 100644
--- a/doc/v3/api_samples/os-config-drive/server-config-drive-get-resp.json
+++ b/doc/v3/api_samples/os-config-drive/server-config-drive-get-resp.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "1642bbdbd61a0f1c513b4bb6e418326103172698104bfa278eca106b",
+ "hostId": "1642bbdbd61a0f1c513b4bb6e418326103172698104bfa278eca106b",
"id": "7838ff1b-b71f-48b9-91e9-7c08de20b249",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
@@ -46,11 +46,11 @@
"My Server Name": "Apache1"
},
"name": "new-server-test",
- "os-config-drive:config_drive": "",
+ "config_drive": "",
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
"updated": "2013-09-22T02:33:25Z",
"user_id": "fake"
}
-}
\ No newline at end of file
+}
diff --git a/doc/v3/api_samples/os-config-drive/server-post-req.json b/doc/v3/api_samples/os-config-drive/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-config-drive/server-post-req.json
+++ b/doc/v3/api_samples/os-config-drive/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-config-drive/server-post-resp.json b/doc/v3/api_samples/os-config-drive/server-post-resp.json
index cdc2c592f3..88388573a7 100644
--- a/doc/v3/api_samples/os-config-drive/server-post-resp.json
+++ b/doc/v3/api_samples/os-config-drive/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "h2cx3Lm47BJc",
+ "adminPass": "h2cx3Lm47BJc",
"id": "f0318e69-11eb-4aed-9840-59b6c72beee8",
"links": [
{
diff --git a/doc/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json b/doc/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json
index 97b96e7a7d..ae4e5f7b19 100644
--- a/doc/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json
+++ b/doc/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "1ed067c90341cd9d94bbe5da960922b56f107262cdc75719a0d97b78",
+ "hostId": "1ed067c90341cd9d94bbe5da960922b56f107262cdc75719a0d97b78",
"id": "f0318e69-11eb-4aed-9840-59b6c72beee8",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
@@ -47,7 +47,7 @@
"My Server Name": "Apache1"
},
"name": "new-server-test",
- "os-config-drive:config_drive": "",
+ "config_drive": "",
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
@@ -55,4 +55,4 @@
"user_id": "fake"
}
]
-}
\ No newline at end of file
+}
diff --git a/doc/v3/api_samples/os-console-auth-tokens/server-post-req.json b/doc/v3/api_samples/os-console-auth-tokens/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-console-auth-tokens/server-post-req.json
+++ b/doc/v3/api_samples/os-console-auth-tokens/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-console-auth-tokens/server-post-resp.json b/doc/v3/api_samples/os-console-auth-tokens/server-post-resp.json
index 3d22d59aa6..e48a97813e 100644
--- a/doc/v3/api_samples/os-console-auth-tokens/server-post-resp.json
+++ b/doc/v3/api_samples/os-console-auth-tokens/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "Kwg5tff6KiUU",
+ "adminPass": "Kwg5tff6KiUU",
"id": "8619225c-67c8-424f-9b46-cec5bad137a2",
"links": [
{
diff --git a/doc/v3/api_samples/os-console-output/server-post-req.json b/doc/v3/api_samples/os-console-output/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-console-output/server-post-req.json
+++ b/doc/v3/api_samples/os-console-output/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-console-output/server-post-resp.json b/doc/v3/api_samples/os-console-output/server-post-resp.json
index 08ca48799e..66e933a74c 100644
--- a/doc/v3/api_samples/os-console-output/server-post-resp.json
+++ b/doc/v3/api_samples/os-console-output/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "8mQaAgHHFsDp",
+ "adminPass": "8mQaAgHHFsDp",
"id": "71e8cf04-0486-46ae-9d18-e51f4978fa13",
"links": [
{
diff --git a/doc/v3/api_samples/os-create-backup/server-post-req.json b/doc/v3/api_samples/os-create-backup/server-post-req.json
index 30851df41a..1c45fbb32f 100644
--- a/doc/v3/api_samples/os-create-backup/server-post-req.json
+++ b/doc/v3/api_samples/os-create-backup/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/doc/v3/api_samples/os-create-backup/server-post-resp.json b/doc/v3/api_samples/os-create-backup/server-post-resp.json
index 270cb84634..353517739f 100644
--- a/doc/v3/api_samples/os-create-backup/server-post-resp.json
+++ b/doc/v3/api_samples/os-create-backup/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "DM3QzjhGTzLB",
+ "adminPass": "DM3QzjhGTzLB",
"id": "bebeec79-497e-4711-a311-d0d2e3dfc73b",
"links": [
{
diff --git a/doc/v3/api_samples/os-deferred-delete/server-post-req.json b/doc/v3/api_samples/os-deferred-delete/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-deferred-delete/server-post-req.json
+++ b/doc/v3/api_samples/os-deferred-delete/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-deferred-delete/server-post-resp.json b/doc/v3/api_samples/os-deferred-delete/server-post-resp.json
index af2d33b2f4..871dfcd0b0 100644
--- a/doc/v3/api_samples/os-deferred-delete/server-post-resp.json
+++ b/doc/v3/api_samples/os-deferred-delete/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "hqgU8QzT54wF",
+ "adminPass": "hqgU8QzT54wF",
"id": "4a3bde9b-fa37-408d-b167-68e1724c923e",
"links": [
{
diff --git a/doc/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json b/doc/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json
new file mode 100644
index 0000000000..a8a2162381
--- /dev/null
+++ b/doc/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json
@@ -0,0 +1,6 @@
+{
+ "evacuate": {
+ "admin_password": "MySecretPass",
+ "on_shared_storage": "False"
+ }
+}
diff --git a/doc/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json b/doc/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json
new file mode 100644
index 0000000000..fcd865c043
--- /dev/null
+++ b/doc/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json
@@ -0,0 +1,3 @@
+{
+ "admin_password": "MySecretPass"
+}
\ No newline at end of file
diff --git a/doc/v3/api_samples/os-evacuate/server-post-req.json b/doc/v3/api_samples/os-evacuate/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-evacuate/server-post-req.json
+++ b/doc/v3/api_samples/os-evacuate/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-evacuate/server-post-resp.json b/doc/v3/api_samples/os-evacuate/server-post-resp.json
index 19e0537fd6..a15d69b508 100644
--- a/doc/v3/api_samples/os-evacuate/server-post-resp.json
+++ b/doc/v3/api_samples/os-evacuate/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "xCQm2Cs2vSFx",
+ "adminPass": "xCQm2Cs2vSFx",
"id": "5f1fbc62-29ed-4e4a-9f15-8affc5e0a796",
"links": [
{
diff --git a/doc/v3/api_samples/os-extended-availability-zone/server-get-resp.json b/doc/v3/api_samples/os-extended-availability-zone/server-get-resp.json
index 940f125cf9..2f4ca3724d 100644
--- a/doc/v3/api_samples/os-extended-availability-zone/server-get-resp.json
+++ b/doc/v3/api_samples/os-extended-availability-zone/server-get-resp.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "b75d6736650f9b272223ceb48f4cde001de1856e381613a922117ab7",
+ "hostId": "b75d6736650f9b272223ceb48f4cde001de1856e381613a922117ab7",
"id": "f22e4521-d03a-4e9f-9fd3-016b9e227219",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-extended-availability-zone/server-post-req.json b/doc/v3/api_samples/os-extended-availability-zone/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-extended-availability-zone/server-post-req.json
+++ b/doc/v3/api_samples/os-extended-availability-zone/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-extended-availability-zone/server-post-resp.json b/doc/v3/api_samples/os-extended-availability-zone/server-post-resp.json
index 6a51c12d7b..8498a90291 100644
--- a/doc/v3/api_samples/os-extended-availability-zone/server-post-resp.json
+++ b/doc/v3/api_samples/os-extended-availability-zone/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "87taVVreqh6R",
+ "adminPass": "87taVVreqh6R",
"id": "f22e4521-d03a-4e9f-9fd3-016b9e227219",
"links": [
{
diff --git a/doc/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json b/doc/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json
index 9563a0acde..de70fcee6e 100644
--- a/doc/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json
+++ b/doc/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "cf5540800371e53064a60b36ff9d6d1d6a8719ffc870c63a270c6bee",
+ "hostId": "cf5540800371e53064a60b36ff9d6d1d6a8719ffc870c63a270c6bee",
"id": "55f43fa2-dc7c-4c0b-b21a-76f9abe516f9",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-extended-server-attributes/server-get-resp.json b/doc/v3/api_samples/os-extended-server-attributes/server-get-resp.json
index 24c54d9c5d..ffc3a76f32 100644
--- a/doc/v3/api_samples/os-extended-server-attributes/server-get-resp.json
+++ b/doc/v3/api_samples/os-extended-server-attributes/server-get-resp.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "3bf189131c61d0e71b0a8686a897a0f50d1693b48c47b721fe77155b",
+ "hostId": "3bf189131c61d0e71b0a8686a897a0f50d1693b48c47b721fe77155b",
"id": "c278163e-36f9-4cf2-b1ac-80db4c63f7a8",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
@@ -46,9 +46,9 @@
"My Server Name": "Apache1"
},
"name": "new-server-test",
- "os-extended-server-attributes:host": "c5f474bf81474f9dbbc404d5b2e4e9b3",
- "os-extended-server-attributes:hypervisor_hostname": "fake-mini",
- "os-extended-server-attributes:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:host": "c5f474bf81474f9dbbc404d5b2e4e9b3",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
diff --git a/doc/v3/api_samples/os-extended-server-attributes/server-post-req.json b/doc/v3/api_samples/os-extended-server-attributes/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-extended-server-attributes/server-post-req.json
+++ b/doc/v3/api_samples/os-extended-server-attributes/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-extended-server-attributes/server-post-resp.json b/doc/v3/api_samples/os-extended-server-attributes/server-post-resp.json
index 80b3a2b5b6..9b72d53c9e 100644
--- a/doc/v3/api_samples/os-extended-server-attributes/server-post-resp.json
+++ b/doc/v3/api_samples/os-extended-server-attributes/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "UCvmH8nHXm66",
+ "adminPass": "UCvmH8nHXm66",
"id": "a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46",
"links": [
{
diff --git a/doc/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json b/doc/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json
index 7236315943..9b037bd187 100644
--- a/doc/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json
+++ b/doc/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "63cf07a9fd82e1d2294926ec5c0d2e1e0ca449224246df75e16f23dc",
+ "hostId": "63cf07a9fd82e1d2294926ec5c0d2e1e0ca449224246df75e16f23dc",
"id": "a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
@@ -47,9 +47,9 @@
"My Server Name": "Apache1"
},
"name": "new-server-test",
- "os-extended-server-attributes:host": "bc8efe4fdb7148a4bb921a2b03d17de6",
- "os-extended-server-attributes:hypervisor_hostname": "fake-mini",
- "os-extended-server-attributes:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:host": "bc8efe4fdb7148a4bb921a2b03d17de6",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
diff --git a/doc/v3/api_samples/os-extended-status/server-get-resp.json b/doc/v3/api_samples/os-extended-status/server-get-resp.json
index 6402094949..706d455bd4 100644
--- a/doc/v3/api_samples/os-extended-status/server-get-resp.json
+++ b/doc/v3/api_samples/os-extended-status/server-get-resp.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "46d2aa2d637bd55606304b611a1928627ee1278c149aef2206268d6e",
+ "hostId": "46d2aa2d637bd55606304b611a1928627ee1278c149aef2206268d6e",
"id": "a868cb5e-c794-47bf-9cd8-e302b72bb94b",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
@@ -46,10 +46,10 @@
"My Server Name": "Apache1"
},
"name": "new-server-test",
- "os-extended-status:locked_by": null,
- "os-extended-status:power_state": 1,
- "os-extended-status:task_state": null,
- "os-extended-status:vm_state": "active",
+ "OS-EXT-STS:locked_by": null,
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
diff --git a/doc/v3/api_samples/os-extended-status/server-post-req.json b/doc/v3/api_samples/os-extended-status/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-extended-status/server-post-req.json
+++ b/doc/v3/api_samples/os-extended-status/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-extended-status/server-post-resp.json b/doc/v3/api_samples/os-extended-status/server-post-resp.json
index 9d953d9b94..08cf336e8a 100644
--- a/doc/v3/api_samples/os-extended-status/server-post-resp.json
+++ b/doc/v3/api_samples/os-extended-status/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "EugbD2jAD2V2",
+ "adminPass": "EugbD2jAD2V2",
"id": "a868cb5e-c794-47bf-9cd8-e302b72bb94b",
"links": [
{
diff --git a/doc/v3/api_samples/os-extended-status/servers-detail-resp.json b/doc/v3/api_samples/os-extended-status/servers-detail-resp.json
index 77fd2afa72..1c1bffc110 100644
--- a/doc/v3/api_samples/os-extended-status/servers-detail-resp.json
+++ b/doc/v3/api_samples/os-extended-status/servers-detail-resp.json
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "a275e77473e464558c4aba0d68e1914d1164e7ee2f69affde7aaae2b",
+ "hostId": "a275e77473e464558c4aba0d68e1914d1164e7ee2f69affde7aaae2b",
"id": "6c8b5385-e74c-4fd5-add6-2fcf42d74a98",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
@@ -47,10 +47,10 @@
"My Server Name": "Apache1"
},
"name": "new-server-test",
- "os-extended-status:locked_by": null,
- "os-extended-status:power_state": 1,
- "os-extended-status:task_state": null,
- "os-extended-status:vm_state": "active",
+ "OS-EXT-STS:locked_by": null,
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
diff --git a/doc/v3/api_samples/os-extended-volumes/server-get-resp.json b/doc/v3/api_samples/os-extended-volumes/server-get-resp.json
index 9f972cb8d1..973c4d40bc 100644
--- a/doc/v3/api_samples/os-extended-volumes/server-get-resp.json
+++ b/doc/v3/api_samples/os-extended-volumes/server-get-resp.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "8feef92e2152b9970b51dbdade024afbec7f8f03daf7cb335a3c1cb9",
+ "hostId": "8feef92e2152b9970b51dbdade024afbec7f8f03daf7cb335a3c1cb9",
"id": "7d62983e-23df-4320-bc89-bbc77f2a2e40",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-extended-volumes/server-post-req.json b/doc/v3/api_samples/os-extended-volumes/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-extended-volumes/server-post-req.json
+++ b/doc/v3/api_samples/os-extended-volumes/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-extended-volumes/server-post-resp.json b/doc/v3/api_samples/os-extended-volumes/server-post-resp.json
index 25a567d74c..f49035cbc1 100644
--- a/doc/v3/api_samples/os-extended-volumes/server-post-resp.json
+++ b/doc/v3/api_samples/os-extended-volumes/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "uNqGTziMK3px",
+ "adminPass": "uNqGTziMK3px",
"id": "7d62983e-23df-4320-bc89-bbc77f2a2e40",
"links": [
{
diff --git a/doc/v3/api_samples/os-extended-volumes/servers-detail-resp.json b/doc/v3/api_samples/os-extended-volumes/servers-detail-resp.json
index e03394fcd3..762de50de1 100644
--- a/doc/v3/api_samples/os-extended-volumes/servers-detail-resp.json
+++ b/doc/v3/api_samples/os-extended-volumes/servers-detail-resp.json
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "f9622ec1b5ab6e3785661ea1c1e0294f95aecbcf27ac4cb60b06bd02",
+ "hostId": "f9622ec1b5ab6e3785661ea1c1e0294f95aecbcf27ac4cb60b06bd02",
"id": "8e479732-7701-48cd-af7a-04d84f51b742",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-hide-server-addresses/server-get-resp.json b/doc/v3/api_samples/os-hide-server-addresses/server-get-resp.json
index 15c89f5499..7ee75f9dc6 100644
--- a/doc/v3/api_samples/os-hide-server-addresses/server-get-resp.json
+++ b/doc/v3/api_samples/os-hide-server-addresses/server-get-resp.json
@@ -11,13 +11,13 @@
}
]
},
- "host_id": "d0635823e9162b22b90ff103f0c30f129bacf6ffb72f4d6fde87e738",
+ "hostId": "d0635823e9162b22b90ff103f0c30f129bacf6ffb72f4d6fde87e738",
"id": "4bdee8c7-507f-40f2-8429-d301edd3791b",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-hide-server-addresses/server-post-req.json b/doc/v3/api_samples/os-hide-server-addresses/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-hide-server-addresses/server-post-req.json
+++ b/doc/v3/api_samples/os-hide-server-addresses/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-hide-server-addresses/server-post-resp.json b/doc/v3/api_samples/os-hide-server-addresses/server-post-resp.json
index b858f1c655..7ba0133976 100644
--- a/doc/v3/api_samples/os-hide-server-addresses/server-post-resp.json
+++ b/doc/v3/api_samples/os-hide-server-addresses/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "5bjyggD6SaSB",
+ "adminPass": "5bjyggD6SaSB",
"id": "3d8bedd4-003d-417a-8cd7-a94cb181185d",
"links": [
{
diff --git a/doc/v3/api_samples/os-hide-server-addresses/servers-details-resp.json b/doc/v3/api_samples/os-hide-server-addresses/servers-details-resp.json
index 6a911ccd04..135a34860b 100644
--- a/doc/v3/api_samples/os-hide-server-addresses/servers-details-resp.json
+++ b/doc/v3/api_samples/os-hide-server-addresses/servers-details-resp.json
@@ -12,13 +12,13 @@
}
]
},
- "host_id": "a4fa72ae8741e5e18fb062c15657b8f689b8da2837b734c61fc9eedd",
+ "hostId": "a4fa72ae8741e5e18fb062c15657b8f689b8da2837b734c61fc9eedd",
"id": "a747eac1-e3ed-446c-935a-c2a2853f919c",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json
index 5fa4493ef6..8694c135a0 100644
--- a/doc/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json
+++ b/doc/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json
@@ -3,13 +3,15 @@
{
"cpu_info": "?",
"current_workload": 0,
+ "status": "enabled",
+ "state": "up",
"disk_available_least": 0,
"host_ip": "1.1.1.1",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
@@ -18,7 +20,8 @@
"running_vms": 0,
"service": {
"host": "e6a37ee802d74863ab8b91ade8f12a67",
- "id": 2
+ "id": 2,
+ "disabled_reason": null
},
"vcpus": 1,
"vcpus_used": 0
diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-list-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-list-resp.json
index 8d94021274..375627499d 100644
--- a/doc/v3/api_samples/os-hypervisors/hypervisors-list-resp.json
+++ b/doc/v3/api_samples/os-hypervisors/hypervisors-list-resp.json
@@ -2,7 +2,9 @@
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
- "id": 1
+ "id": 1,
+ "state": "up",
+ "status": "enabled"
}
]
-}
\ No newline at end of file
+}
diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-search-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-search-resp.json
index 8d94021274..375627499d 100644
--- a/doc/v3/api_samples/os-hypervisors/hypervisors-search-resp.json
+++ b/doc/v3/api_samples/os-hypervisors/hypervisors-search-resp.json
@@ -2,7 +2,9 @@
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
- "id": 1
+ "id": 1,
+ "state": "up",
+ "status": "enabled"
}
]
-}
\ No newline at end of file
+}
diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json
index 934ef0c02d..710b05b930 100644
--- a/doc/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json
+++ b/doc/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json
@@ -2,6 +2,8 @@
"hypervisor": {
"hypervisor_hostname": "fake-mini",
"id": 1,
+ "state": "up",
+ "status": "enabled",
"servers": []
}
-}
\ No newline at end of file
+}
diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-show-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-show-resp.json
index 1ab1b99be8..3d2c972ce3 100644
--- a/doc/v3/api_samples/os-hypervisors/hypervisors-show-resp.json
+++ b/doc/v3/api_samples/os-hypervisors/hypervisors-show-resp.json
@@ -1,6 +1,8 @@
{
"hypervisor": {
"cpu_info": "?",
+ "state": "up",
+ "status": "enabled",
"current_workload": 0,
"disk_available_least": 0,
"host_ip": "1.1.1.1",
@@ -8,7 +10,7 @@
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
@@ -17,7 +19,8 @@
"running_vms": 0,
"service": {
"host": "043b3cacf6f34c90a7245151fc8ebcda",
- "id": 2
+ "id": 2,
+ "disabled_reason": null
},
"vcpus": 1,
"vcpus_used": 0
diff --git a/doc/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json b/doc/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json
index f5f9d19e7c..78521b3731 100644
--- a/doc/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json
+++ b/doc/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json
@@ -2,6 +2,8 @@
"hypervisor": {
"hypervisor_hostname": "fake-mini",
"id": 1,
+ "state": "up",
+ "status": "enabled",
"uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
}
-}
\ No newline at end of file
+}
diff --git a/doc/v3/api_samples/os-lock-server/server-post-req.json b/doc/v3/api_samples/os-lock-server/server-post-req.json
index 30851df41a..1c45fbb32f 100644
--- a/doc/v3/api_samples/os-lock-server/server-post-req.json
+++ b/doc/v3/api_samples/os-lock-server/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/doc/v3/api_samples/os-lock-server/server-post-resp.json b/doc/v3/api_samples/os-lock-server/server-post-resp.json
index 270cb84634..353517739f 100644
--- a/doc/v3/api_samples/os-lock-server/server-post-resp.json
+++ b/doc/v3/api_samples/os-lock-server/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "DM3QzjhGTzLB",
+ "adminPass": "DM3QzjhGTzLB",
"id": "bebeec79-497e-4711-a311-d0d2e3dfc73b",
"links": [
{
diff --git a/doc/v3/api_samples/os-migrate-server/server-post-req.json b/doc/v3/api_samples/os-migrate-server/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-migrate-server/server-post-req.json
+++ b/doc/v3/api_samples/os-migrate-server/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-migrate-server/server-post-resp.json b/doc/v3/api_samples/os-migrate-server/server-post-resp.json
index 270cb84634..353517739f 100644
--- a/doc/v3/api_samples/os-migrate-server/server-post-resp.json
+++ b/doc/v3/api_samples/os-migrate-server/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "DM3QzjhGTzLB",
+ "adminPass": "DM3QzjhGTzLB",
"id": "bebeec79-497e-4711-a311-d0d2e3dfc73b",
"links": [
{
diff --git a/doc/v3/api_samples/os-multinic/server-post-req.json b/doc/v3/api_samples/os-multinic/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-multinic/server-post-req.json
+++ b/doc/v3/api_samples/os-multinic/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-multinic/server-post-resp.json b/doc/v3/api_samples/os-multinic/server-post-resp.json
index 7a88d2e911..a7e72d6a7f 100644
--- a/doc/v3/api_samples/os-multinic/server-post-resp.json
+++ b/doc/v3/api_samples/os-multinic/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "5Y9rR4XaM8Qg",
+ "adminPass": "5Y9rR4XaM8Qg",
"id": "bbe8d469-e8cb-49b1-96d8-f93b68c82355",
"links": [
{
diff --git a/doc/v3/api_samples/os-pause-server/server-post-req.json b/doc/v3/api_samples/os-pause-server/server-post-req.json
index 30851df41a..1c45fbb32f 100644
--- a/doc/v3/api_samples/os-pause-server/server-post-req.json
+++ b/doc/v3/api_samples/os-pause-server/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/doc/v3/api_samples/os-pause-server/server-post-resp.json b/doc/v3/api_samples/os-pause-server/server-post-resp.json
index 270cb84634..353517739f 100644
--- a/doc/v3/api_samples/os-pause-server/server-post-resp.json
+++ b/doc/v3/api_samples/os-pause-server/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "DM3QzjhGTzLB",
+ "adminPass": "DM3QzjhGTzLB",
"id": "bebeec79-497e-4711-a311-d0d2e3dfc73b",
"links": [
{
diff --git a/doc/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json b/doc/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json
index aa0e92efb3..f6f7363ef4 100644
--- a/doc/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json
+++ b/doc/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json
@@ -2,6 +2,8 @@
"hypervisors": [
{
"cpu_info": "?",
+ "state": "up",
+ "status": "enabled",
"current_workload": 0,
"disk_available_least": 0,
"host_ip": "1.1.1.1",
@@ -9,7 +11,7 @@
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
@@ -30,7 +32,8 @@
"running_vms": 0,
"service": {
"host": "043b3cacf6f34c90a7245151fc8ebcda",
- "id": 2
+ "id": 2,
+ "disabled_reason": null
},
"vcpus": 1,
"vcpus_used": 0
diff --git a/doc/v3/api_samples/os-pci/hypervisors-pci-show-resp.json b/doc/v3/api_samples/os-pci/hypervisors-pci-show-resp.json
index 1750501621..f2fa988f83 100644
--- a/doc/v3/api_samples/os-pci/hypervisors-pci-show-resp.json
+++ b/doc/v3/api_samples/os-pci/hypervisors-pci-show-resp.json
@@ -4,11 +4,13 @@
"current_workload": 0,
"disk_available_least": 0,
"host_ip": "1.1.1.1",
+ "state": "up",
+ "status": "enabled",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
@@ -29,7 +31,8 @@
"running_vms": 0,
"service": {
"host": "043b3cacf6f34c90a7245151fc8ebcda",
- "id": 2
+ "id": 2,
+ "disabled_reason": null
},
"vcpus": 1,
"vcpus_used": 0
diff --git a/doc/v3/api_samples/os-pci/server-get-resp.json b/doc/v3/api_samples/os-pci/server-get-resp.json
index a58574e628..f517aefbb4 100644
--- a/doc/v3/api_samples/os-pci/server-get-resp.json
+++ b/doc/v3/api_samples/os-pci/server-get-resp.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "b7e88944272df30c113572778bcf5527f02e9c2a745221214536c1a2",
+ "hostId": "b7e88944272df30c113572778bcf5527f02e9c2a745221214536c1a2",
"id": "9dafa6bc-7a9f-45b2-8177-11800ceb7224",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-pci/server-post-req.json b/doc/v3/api_samples/os-pci/server-post-req.json
index 30851df41a..1c45fbb32f 100644
--- a/doc/v3/api_samples/os-pci/server-post-req.json
+++ b/doc/v3/api_samples/os-pci/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/doc/v3/api_samples/os-pci/server-post-resp.json b/doc/v3/api_samples/os-pci/server-post-resp.json
index 6b9ad18047..deb7cbc1f7 100644
--- a/doc/v3/api_samples/os-pci/server-post-resp.json
+++ b/doc/v3/api_samples/os-pci/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "8C5KEgw2cQxu",
+ "adminPass": "8C5KEgw2cQxu",
"id": "fb947804-6a43-499d-9526-3eac8adf7271",
"links": [
{
diff --git a/doc/v3/api_samples/os-pci/servers-detail-resp.json b/doc/v3/api_samples/os-pci/servers-detail-resp.json
index f38922f3d1..872a5335f0 100644
--- a/doc/v3/api_samples/os-pci/servers-detail-resp.json
+++ b/doc/v3/api_samples/os-pci/servers-detail-resp.json
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "416f83c758ea0f9271018b278a9dcedb91b1190deaa598704b87219b",
+ "hostId": "416f83c758ea0f9271018b278a9dcedb91b1190deaa598704b87219b",
"id": "ef440f98-04e8-46ea-ae74-e24d437040ea",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-remote-consoles/server-post-req.json b/doc/v3/api_samples/os-remote-consoles/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-remote-consoles/server-post-req.json
+++ b/doc/v3/api_samples/os-remote-consoles/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-remote-consoles/server-post-resp.json b/doc/v3/api_samples/os-remote-consoles/server-post-resp.json
index 3d22d59aa6..e48a97813e 100644
--- a/doc/v3/api_samples/os-remote-consoles/server-post-resp.json
+++ b/doc/v3/api_samples/os-remote-consoles/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "Kwg5tff6KiUU",
+ "adminPass": "Kwg5tff6KiUU",
"id": "8619225c-67c8-424f-9b46-cec5bad137a2",
"links": [
{
diff --git a/doc/v3/api_samples/os-rescue/server-get-resp-rescue.json b/doc/v3/api_samples/os-rescue/server-get-resp-rescue.json
index 1fa15ecfc9..4df7a7cb89 100644
--- a/doc/v3/api_samples/os-rescue/server-get-resp-rescue.json
+++ b/doc/v3/api_samples/os-rescue/server-get-resp-rescue.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "f04994c5b4aac1cacbb83b09c2506e457d97dd54f620961624574690",
+ "hostId": "f04994c5b4aac1cacbb83b09c2506e457d97dd54f620961624574690",
"id": "2fd0c66b-50af-41d2-9253-9fa41e7e8dd8",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-rescue/server-get-resp-unrescue.json b/doc/v3/api_samples/os-rescue/server-get-resp-unrescue.json
index ebed3eeb8d..79ea58d3ce 100644
--- a/doc/v3/api_samples/os-rescue/server-get-resp-unrescue.json
+++ b/doc/v3/api_samples/os-rescue/server-get-resp-unrescue.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "53cd4520a6cc639eeabcae4a0512b93e4675d431002e0b60e2dcfc04",
+ "hostId": "53cd4520a6cc639eeabcae4a0512b93e4675d431002e0b60e2dcfc04",
"id": "edfc3905-1f3c-4819-8fc3-a7d8131cfa22",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-rescue/server-post-req.json b/doc/v3/api_samples/os-rescue/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-rescue/server-post-req.json
+++ b/doc/v3/api_samples/os-rescue/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-rescue/server-post-resp.json b/doc/v3/api_samples/os-rescue/server-post-resp.json
index 2a19467660..19534dcc00 100644
--- a/doc/v3/api_samples/os-rescue/server-post-resp.json
+++ b/doc/v3/api_samples/os-rescue/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "8RK85ufqhJVq",
+ "adminPass": "8RK85ufqhJVq",
"id": "edfc3905-1f3c-4819-8fc3-a7d8131cfa22",
"links": [
{
diff --git a/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json b/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json
index 0c5c998e42..0466cecb18 100644
--- a/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json
+++ b/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json
@@ -1,10 +1,10 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/openstack/flavors/1",
- "os-scheduler-hints:scheduler_hints": {
- "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e"
- }
+ "imageRef" : "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1"
+ },
+ "OS-SCH-HNT:scheduler_hints": {
+ "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e"
}
}
diff --git a/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json b/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json
index 6a0e1c1e8d..a06736dd2b 100644
--- a/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json
+++ b/doc/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "erQXgJ8NBDD4",
+ "adminPass": "erQXgJ8NBDD4",
"id": "4c8b1df3-46f7-4555-98d8-cdb869aaf9ad",
"links": [
{
diff --git a/doc/v3/api_samples/os-security-groups/server-get-resp.json b/doc/v3/api_samples/os-security-groups/server-get-resp.json
index c25a140d2a..9993ae9b78 100644
--- a/doc/v3/api_samples/os-security-groups/server-get-resp.json
+++ b/doc/v3/api_samples/os-security-groups/server-get-resp.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "0e312d6763795d572ccd716973fd078290d9ec446517b222d3395660",
+ "hostId": "0e312d6763795d572ccd716973fd078290d9ec446517b222d3395660",
"id": "f6961f7a-0133-4f27-94cd-901dca4ba426",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-security-groups/server-post-req.json b/doc/v3/api_samples/os-security-groups/server-post-req.json
index 428217cfc3..365dde78e0 100644
--- a/doc/v3/api_samples/os-security-groups/server-post-req.json
+++ b/doc/v3/api_samples/os-security-groups/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/openstack/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/doc/v3/api_samples/os-security-groups/server-post-resp.json b/doc/v3/api_samples/os-security-groups/server-post-resp.json
index 4f1d6b752c..df29afea9d 100644
--- a/doc/v3/api_samples/os-security-groups/server-post-resp.json
+++ b/doc/v3/api_samples/os-security-groups/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "ki8cbWeZdxH6",
+ "adminPass": "ki8cbWeZdxH6",
"id": "2dabdd93-ced7-4607-a542-2516de84e0e5",
"links": [
{
diff --git a/doc/v3/api_samples/os-security-groups/servers-detail-resp.json b/doc/v3/api_samples/os-security-groups/servers-detail-resp.json
index 49463446ef..435bbd145d 100644
--- a/doc/v3/api_samples/os-security-groups/servers-detail-resp.json
+++ b/doc/v3/api_samples/os-security-groups/servers-detail-resp.json
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "afeeb125d4d37d0a2123e3144a20a6672fda5d4b6cb85ec193430d82",
+ "hostId": "afeeb125d4d37d0a2123e3144a20a6672fda5d4b6cb85ec193430d82",
"id": "1b94e3fc-1b1c-431a-a077-6b280fb720ce",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-server-diagnostics/server-post-req.json b/doc/v3/api_samples/os-server-diagnostics/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-server-diagnostics/server-post-req.json
+++ b/doc/v3/api_samples/os-server-diagnostics/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-server-diagnostics/server-post-resp.json b/doc/v3/api_samples/os-server-diagnostics/server-post-resp.json
index 03db3eab87..cb16c18038 100644
--- a/doc/v3/api_samples/os-server-diagnostics/server-post-resp.json
+++ b/doc/v3/api_samples/os-server-diagnostics/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "H83mnjinc5jy",
+ "adminPass": "H83mnjinc5jy",
"id": "b2bbf280-a78d-4724-90ba-b00dd5659097",
"links": [
{
diff --git a/doc/v3/api_samples/os-server-external-events/server-post-req.json b/doc/v3/api_samples/os-server-external-events/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-server-external-events/server-post-req.json
+++ b/doc/v3/api_samples/os-server-external-events/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-server-external-events/server-post-resp.json b/doc/v3/api_samples/os-server-external-events/server-post-resp.json
index 270cb84634..353517739f 100644
--- a/doc/v3/api_samples/os-server-external-events/server-post-resp.json
+++ b/doc/v3/api_samples/os-server-external-events/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "DM3QzjhGTzLB",
+ "adminPass": "DM3QzjhGTzLB",
"id": "bebeec79-497e-4711-a311-d0d2e3dfc73b",
"links": [
{
diff --git a/doc/v3/api_samples/os-server-usage/server-get-resp.json b/doc/v3/api_samples/os-server-usage/server-get-resp.json
index 06f977d3c7..5f12283cc7 100644
--- a/doc/v3/api_samples/os-server-usage/server-get-resp.json
+++ b/doc/v3/api_samples/os-server-usage/server-get-resp.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "73cf3a40601b63f5992894be2daa3712dd599d1c919984951e21edda",
+ "hostId": "73cf3a40601b63f5992894be2daa3712dd599d1c919984951e21edda",
"id": "cee6d136-e378-4cfc-9eec-71797f025991",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-server-usage/server-post-req.json b/doc/v3/api_samples/os-server-usage/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-server-usage/server-post-req.json
+++ b/doc/v3/api_samples/os-server-usage/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-server-usage/server-post-resp.json b/doc/v3/api_samples/os-server-usage/server-post-resp.json
index 3394fde4b1..b725ed5d81 100644
--- a/doc/v3/api_samples/os-server-usage/server-post-resp.json
+++ b/doc/v3/api_samples/os-server-usage/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "kmspFLBzL75q",
+ "adminPass": "kmspFLBzL75q",
"id": "f8eeb5ba-19b7-49be-a1a9-10250dda5b14",
"links": [
{
diff --git a/doc/v3/api_samples/os-server-usage/servers-detail-resp.json b/doc/v3/api_samples/os-server-usage/servers-detail-resp.json
index ad4008e90b..d917fdb3f2 100644
--- a/doc/v3/api_samples/os-server-usage/servers-detail-resp.json
+++ b/doc/v3/api_samples/os-server-usage/servers-detail-resp.json
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "117535ce0eda7ee02ebffe2c976173629385481ae3f2bded5e14a66b",
+ "hostId": "117535ce0eda7ee02ebffe2c976173629385481ae3f2bded5e14a66b",
"id": "ae114799-9164-48f5-a036-6ef9310acbc4",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/os-shelve/server-post-req.json b/doc/v3/api_samples/os-shelve/server-post-req.json
index 8cc0fd01df..f63022b56b 100644
--- a/doc/v3/api_samples/os-shelve/server-post-req.json
+++ b/doc/v3/api_samples/os-shelve/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/openstack/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/doc/v3/api_samples/os-shelve/server-post-resp.json b/doc/v3/api_samples/os-shelve/server-post-resp.json
index 2d2eafa2eb..c6c478e625 100644
--- a/doc/v3/api_samples/os-shelve/server-post-resp.json
+++ b/doc/v3/api_samples/os-shelve/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "LJGRdNsvzh6z",
+ "adminPass": "LJGRdNsvzh6z",
"id": "1d08717a-835e-4dca-9bfb-166fa18a6715",
"links": [
{
diff --git a/doc/v3/api_samples/os-suspend-server/server-post-req.json b/doc/v3/api_samples/os-suspend-server/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/os-suspend-server/server-post-req.json
+++ b/doc/v3/api_samples/os-suspend-server/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/os-suspend-server/server-post-resp.json b/doc/v3/api_samples/os-suspend-server/server-post-resp.json
index 270cb84634..353517739f 100644
--- a/doc/v3/api_samples/os-suspend-server/server-post-resp.json
+++ b/doc/v3/api_samples/os-suspend-server/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "DM3QzjhGTzLB",
+ "adminPass": "DM3QzjhGTzLB",
"id": "bebeec79-497e-4711-a311-d0d2e3dfc73b",
"links": [
{
diff --git a/doc/v3/api_samples/os-user-data/userdata-post-req.json b/doc/v3/api_samples/os-user-data/userdata-post-req.json
index 21ca21b6db..e11c610961 100644
--- a/doc/v3/api_samples/os-user-data/userdata-post-req.json
+++ b/doc/v3/api_samples/os-user-data/userdata-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/openstack/flavors/1",
+ "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/openstack/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/doc/v3/api_samples/os-user-data/userdata-post-resp.json b/doc/v3/api_samples/os-user-data/userdata-post-resp.json
index f9e7b1172d..aeeae8d86c 100644
--- a/doc/v3/api_samples/os-user-data/userdata-post-resp.json
+++ b/doc/v3/api_samples/os-user-data/userdata-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "2xHoDU7Gd7vw",
+ "adminPass": "2xHoDU7Gd7vw",
"id": "976a62bb-0d4a-4e17-9044-1864e888a557",
"links": [
{
diff --git a/doc/v3/api_samples/server-ips/server-post-req.json b/doc/v3/api_samples/server-ips/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/server-ips/server-post-req.json
+++ b/doc/v3/api_samples/server-ips/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/server-ips/server-post-resp.json b/doc/v3/api_samples/server-ips/server-post-resp.json
index 482fc6b077..70064c0996 100644
--- a/doc/v3/api_samples/server-ips/server-post-resp.json
+++ b/doc/v3/api_samples/server-ips/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "Ag463BYwnXEf",
+ "adminPass": "Ag463BYwnXEf",
"id": "0813a7dc-8e97-42df-9634-957109499bf0",
"links": [
{
diff --git a/doc/v3/api_samples/server-metadata/server-post-req.json b/doc/v3/api_samples/server-metadata/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/server-metadata/server-post-req.json
+++ b/doc/v3/api_samples/server-metadata/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/server-metadata/server-post-resp.json b/doc/v3/api_samples/server-metadata/server-post-resp.json
index a20d117f0c..aa8939bf18 100644
--- a/doc/v3/api_samples/server-metadata/server-post-resp.json
+++ b/doc/v3/api_samples/server-metadata/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "ys9M8HQXvwEJ",
+ "adminPass": "ys9M8HQXvwEJ",
"id": "a6ebe5b4-b68b-420b-9c1e-620c4d3e0389",
"links": [
{
diff --git a/doc/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json b/doc/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json
index 4da2c1a74b..e348f8af4f 100644
--- a/doc/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json
+++ b/doc/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json
@@ -1,8 +1,8 @@
{
"rebuild" : {
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"name" : "foobar",
- "admin_password" : "seekr3t",
+ "adminPass" : "seekr3t",
"metadata" : {
"meta_var" : "meta_val"
},
diff --git a/doc/v3/api_samples/servers/server-action-rebuild-resp.json b/doc/v3/api_samples/servers/server-action-rebuild-resp.json
index 2f17a0d529..4c38ad2f8b 100644
--- a/doc/v3/api_samples/servers/server-action-rebuild-resp.json
+++ b/doc/v3/api_samples/servers/server-action-rebuild-resp.json
@@ -10,7 +10,7 @@
}
]
},
- "admin_password": "seekr3t",
+ "adminPass": "seekr3t",
"created": "2013-11-14T06:29:00Z",
"flavor": {
"id": "1",
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66",
+ "hostId": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66",
"id": "a0a80a94-3d81-4a10-822a-daa0cf9e870b",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/servers/server-action-rebuild.json b/doc/v3/api_samples/servers/server-action-rebuild.json
index 964f6fa1d6..7900828eab 100644
--- a/doc/v3/api_samples/servers/server-action-rebuild.json
+++ b/doc/v3/api_samples/servers/server-action-rebuild.json
@@ -1,8 +1,8 @@
{
"rebuild" : {
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"name" : "foobar",
- "admin_password" : "seekr3t",
+ "adminPass" : "seekr3t",
"metadata" : {
"meta_var" : "meta_val"
},
diff --git a/doc/v3/api_samples/servers/server-action-resize.json b/doc/v3/api_samples/servers/server-action-resize.json
index 7dcf7751db..bdaa37a176 100644
--- a/doc/v3/api_samples/servers/server-action-resize.json
+++ b/doc/v3/api_samples/servers/server-action-resize.json
@@ -1,5 +1,5 @@
{
"resize" : {
- "flavor_ref" : "2"
+ "flavorRef" : "2"
}
}
\ No newline at end of file
diff --git a/doc/v3/api_samples/servers/server-get-resp.json b/doc/v3/api_samples/servers/server-get-resp.json
index 03ec3a9245..fa7708f177 100644
--- a/doc/v3/api_samples/servers/server-get-resp.json
+++ b/doc/v3/api_samples/servers/server-get-resp.json
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
"id": "0e44cc9c-e052-415d-afbf-469b0d384170",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/doc/v3/api_samples/servers/server-post-req.json b/doc/v3/api_samples/servers/server-post-req.json
index 2eedab6147..d4c7973c10 100644
--- a/doc/v3/api_samples/servers/server-post-req.json
+++ b/doc/v3/api_samples/servers/server-post-req.json
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
- "flavor_ref" : "http://openstack.example.com/flavors/1",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/doc/v3/api_samples/servers/server-post-resp.json b/doc/v3/api_samples/servers/server-post-resp.json
index 6e2e900f64..3e69dffe68 100644
--- a/doc/v3/api_samples/servers/server-post-resp.json
+++ b/doc/v3/api_samples/servers/server-post-resp.json
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "6NpUwoz2QDRN",
+ "adminPass": "6NpUwoz2QDRN",
"id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
"links": [
{
diff --git a/doc/v3/api_samples/servers/servers-details-resp.json b/doc/v3/api_samples/servers/servers-details-resp.json
index f478ac8fa7..6644953453 100644
--- a/doc/v3/api_samples/servers/servers-details-resp.json
+++ b/doc/v3/api_samples/servers/servers-details-resp.json
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25",
+ "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25",
"id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index cc5b8ea4a8..89544a8009 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -66,7 +66,15 @@
"compute_extension:v3:os-attach-interfaces:discoverable": "",
"compute_extension:baremetal_nodes": "rule:admin_api",
"compute_extension:cells": "rule:admin_api",
+ "compute_extension:cells:create": "rule:admin_api",
+ "compute_extension:cells:delete": "rule:admin_api",
+ "compute_extension:cells:update": "rule:admin_api",
+ "compute_extension:cells:sync_instances": "rule:admin_api",
"compute_extension:v3:os-cells": "rule:admin_api",
+ "compute_extension:v3:os-cells:create": "rule:admin_api",
+ "compute_extension:v3:os-cells:delete": "rule:admin_api",
+ "compute_extension:v3:os-cells:update": "rule:admin_api",
+ "compute_extension:v3:os-cells:sync_instances": "rule:admin_api",
"compute_extension:v3:os-cells:discoverable": "",
"compute_extension:certificates": "",
"compute_extension:v3:os-certificates:create": "",
@@ -137,6 +145,7 @@
"compute_extension:v3:flavor-extra-specs:update": "rule:admin_api",
"compute_extension:v3:flavor-extra-specs:delete": "rule:admin_api",
"compute_extension:flavormanage": "rule:admin_api",
+ "compute_extension:v3:flavor-manage:discoverable": "",
"compute_extension:v3:flavor-manage": "rule:admin_api",
"compute_extension:floating_ip_dns": "",
"compute_extension:floating_ip_pools": "",
@@ -320,5 +329,6 @@
"network:get_dns_entries_by_name": "",
"network:create_private_dns_domain": "",
"network:create_public_dns_domain": "",
- "network:delete_dns_domain": ""
+ "network:delete_dns_domain": "",
+ "network:attach_external_network": "rule:admin_api"
}
diff --git a/nova/api/auth.py b/nova/api/auth.py
index b0015cce2b..710281a00d 100644
--- a/nova/api/auth.py
+++ b/nova/api/auth.py
@@ -21,7 +21,8 @@
import webob.exc
from nova import context
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova.i18n import _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common.middleware import request_id
@@ -69,7 +70,7 @@ def pipeline_factory(loader, global_conf, **local_conf):
# If the configuration file still contains 'ratelimit_v3', just ignore it.
# We will remove this code at next release (J)
if 'ratelimit_v3' in pipeline:
- LOG.warn(_('ratelimit_v3 is removed from v3 api.'))
+ LOG.warn(_LW('ratelimit_v3 is removed from v3 api.'))
pipeline.remove('ratelimit_v3')
return _load_pipeline(loader, pipeline)
@@ -156,6 +157,6 @@ def _get_roles(self, req):
# Fallback to deprecated role header:
roles = req.headers.get('X_ROLE', '')
if roles:
- LOG.warn(_("Sourcing roles from deprecated X-Role HTTP "
- "header"))
+ LOG.warn(_LW("Sourcing roles from deprecated X-Role HTTP "
+ "header"))
return [r.strip() for r in roles.split(',')]
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 358e7c91e7..6d9c3ab845 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -32,8 +32,9 @@
from nova.api import validator
from nova import context
from nova import exception
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common.gettextutils import _LE
+from nova.i18n import _
+from nova.i18n import _LE
+from nova.i18n import _LW
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -76,7 +77,7 @@
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
-## Fault Wrapper around all EC2 requests ##
+# Fault Wrapper around all EC2 requests
class FaultWrapper(wsgi.Middleware):
"""Calls the middleware stack, captures any exceptions into faults."""
@@ -165,9 +166,9 @@ def __call__(self, req):
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=CONF.lockout_window * 60)
elif failures >= CONF.lockout_attempts:
- LOG.warn(_('Access key %(access_key)s has had %(failures)d '
- 'failed authentications and will be locked out '
- 'for %(lock_mins)d minutes.'),
+ LOG.warn(_LW('Access key %(access_key)s has had %(failures)d '
+ 'failed authentications and will be locked out '
+ 'for %(lock_mins)d minutes.'),
{'access_key': access_key,
'failures': failures,
'lock_mins': CONF.lockout_minutes})
@@ -489,10 +490,10 @@ def ec2_error_ex(ex, req, code=None, message=None, unexpected=False):
if unexpected:
log_fun = LOG.error
- log_msg = _("Unexpected %(ex_name)s raised: %(ex_str)s")
+ log_msg = _LE("Unexpected %(ex_name)s raised: %(ex_str)s")
else:
log_fun = LOG.debug
- log_msg = _("%(ex_name)s raised: %(ex_str)s")
+ log_msg = "%(ex_name)s raised: %(ex_str)s"
# NOTE(jruzicka): For compatibility with EC2 API, treat expected
# exceptions as client (4xx) errors. The exception error code is 500
# by default and most exceptions inherit this from NovaException even
@@ -516,7 +517,7 @@ def ec2_error_ex(ex, req, code=None, message=None, unexpected=False):
for k in env.keys():
if not isinstance(env[k], six.string_types):
env.pop(k)
- log_fun(_('Environment: %s') % jsonutils.dumps(env))
+ log_fun(_LE('Environment: %s'), jsonutils.dumps(env))
if not message:
message = _('Unknown error occurred.')
return faults.ec2_error_response(request_id, code, message, status=status)
@@ -554,6 +555,7 @@ def __call__(self, req):
exception.FloatingIpNotFound,
exception.ImageNotActive,
exception.InvalidInstanceIDMalformed,
+ exception.InvalidVolumeIDMalformed,
exception.InvalidKeypair,
exception.InvalidParameterValue,
exception.InvalidPortRange,
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index f6afb1f123..3de6136ed7 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -36,14 +36,14 @@
from nova import compute
from nova.compute import api as compute_api
from nova.compute import vm_states
-from nova import db
from nova import exception
+from nova.i18n import _
+from nova.i18n import _LW
from nova.image import s3
from nova import network
from nova.network.security_group import neutron_driver
from nova import objects
from nova.objects import base as obj_base
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
@@ -85,13 +85,28 @@
QUOTAS = quota.QUOTAS
-def validate_ec2_id(val):
+# EC2 ID can return the following error codes:
+# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/api-error-codes.html
+# Validate methods are split to return valid EC2 error codes for different
+# resource types
+def _validate_ec2_id(val):
if not validator.validate_str()(val):
- raise exception.InvalidInstanceIDMalformed(val=val)
+ raise exception.InvalidEc2Id(ec2_id=val)
+ ec2utils.ec2_id_to_id(val)
+
+
+def validate_volume_id(volume_id):
try:
- ec2utils.ec2_id_to_id(val)
+ _validate_ec2_id(volume_id)
except exception.InvalidEc2Id:
- raise exception.InvalidInstanceIDMalformed(val=val)
+ raise exception.InvalidVolumeIDMalformed(volume_id=volume_id)
+
+
+def validate_instance_id(instance_id):
+ try:
+ _validate_ec2_id(instance_id)
+ except exception.InvalidEc2Id:
+ raise exception.InvalidInstanceIDMalformed(instance_id=instance_id)
# EC2 API can return the following values as documented in the EC2 API
@@ -171,7 +186,7 @@ def _format_block_device_mapping(bdm):
('deleteOnTermination', 'delete_on_termination'))
ebs = {}
for name, k in ebs_keys:
- if k in bdm:
+ if bdm.get(k) is not None:
if k == 'snapshot_id':
ebs[name] = ec2utils.id_to_ec2_snap_id(bdm[k])
elif k == 'volume_id':
@@ -387,7 +402,7 @@ def _format_snapshot(self, context, snapshot):
return s
def create_snapshot(self, context, volume_id, **kwargs):
- validate_ec2_id(volume_id)
+ validate_volume_id(volume_id)
LOG.audit(_("Create snapshot of volume %s"), volume_id,
context=context)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
@@ -398,7 +413,9 @@ def create_snapshot(self, context, volume_id, **kwargs):
else:
snapshot = self.volume_api.create_snapshot(*args)
- db.ec2_snapshot_create(context, snapshot['id'])
+ smap = objects.EC2SnapshotMapping(context, uuid=snapshot['id'])
+ smap.create()
+
return self._format_snapshot(context, snapshot)
def delete_snapshot(self, context, snapshot_id, **kwargs):
@@ -411,7 +428,7 @@ def describe_key_pairs(self, context, key_name=None, **kwargs):
if key_name is not None:
key_pairs = [x for x in key_pairs if x['name'] in key_name]
- #If looking for non existent key pair
+ # If looking for non existent key pair
if key_name is not None and not key_pairs:
msg = _('Could not find key pair(s): %s') % ','.join(key_name)
raise exception.KeypairNotFound(message=msg)
@@ -745,7 +762,7 @@ def get_password_data(self, context, instance_id, **kwargs):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
- validate_ec2_id(ec2_id)
+ validate_instance_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid)
output = password.extract_password(instance)
@@ -764,7 +781,7 @@ def get_console_output(self, context, instance_id, **kwargs):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
- validate_ec2_id(ec2_id)
+ validate_instance_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
@@ -778,7 +795,7 @@ def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
volumes = []
for ec2_id in volume_id:
- validate_ec2_id(ec2_id)
+ validate_volume_id(ec2_id)
internal_id = ec2utils.ec2_vol_id_to_uuid(ec2_id)
volume = self.volume_api.get(context, internal_id)
volumes.append(volume)
@@ -858,7 +875,7 @@ def create_volume(self, context, **kwargs):
return self._format_volume(context, dict(volume))
def delete_volume(self, context, volume_id, **kwargs):
- validate_ec2_id(volume_id)
+ validate_volume_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
self.volume_api.delete(context, volume_id)
return True
@@ -867,8 +884,8 @@ def attach_volume(self, context,
volume_id,
instance_id,
device, **kwargs):
- validate_ec2_id(instance_id)
- validate_ec2_id(volume_id)
+ validate_instance_id(instance_id)
+ validate_volume_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid,
@@ -901,7 +918,7 @@ def _get_instance_from_volume(self, context, volume):
raise exception.VolumeUnattached(volume_id=volume['id'])
def detach_volume(self, context, volume_id, **kwargs):
- validate_ec2_id(volume_id)
+ validate_volume_id(volume_id)
volume_id = ec2utils.ec2_vol_id_to_uuid(volume_id)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
volume = self.volume_api.get(context, volume_id)
@@ -992,7 +1009,7 @@ def _format_attr_user_data(instance, result):
if fn is None:
raise exception.InvalidAttribute(attr=attribute)
- validate_ec2_id(instance_id)
+ validate_instance_id(instance_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, instance_id)
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
@@ -1048,6 +1065,28 @@ def _format_terminate_instances(self, context, instance_id,
instances_set.append(i)
return {'instancesSet': instances_set}
+ def _format_stop_instances(self, context, instance_ids, previous_states):
+ instances_set = []
+ for (ec2_id, previous_state) in zip(instance_ids, previous_states):
+ i = {}
+ i['instanceId'] = ec2_id
+ i['previousState'] = _state_description(previous_state['vm_state'],
+ previous_state['shutdown_terminate'])
+ i['currentState'] = _state_description(vm_states.STOPPED, True)
+ instances_set.append(i)
+ return {'instancesSet': instances_set}
+
+ def _format_start_instances(self, context, instance_id, previous_states):
+ instances_set = []
+ for (ec2_id, previous_state) in zip(instance_id, previous_states):
+ i = {}
+ i['instanceId'] = ec2_id
+ i['previousState'] = _state_description(previous_state['vm_state'],
+ previous_state['shutdown_terminate'])
+ i['currentState'] = _state_description(vm_states.ACTIVE, True)
+ instances_set.append(i)
+ return {'instancesSet': instances_set}
+
def _format_instance_bdm(self, context, instance_uuid, root_device_name,
result):
"""Format InstanceBlockDeviceMappingResponseItemType."""
@@ -1263,8 +1302,8 @@ def associate_address(self, context, instance_id, public_ip, **kwargs):
# changed to support specifying a particular fixed_ip if
# multiple exist but this may not apply to ec2..
if len(fixed_ips) > 1:
- msg = _('multiple fixed_ips exist, using the first: %s')
- LOG.warning(msg, fixed_ips[0])
+ LOG.warn(_LW('multiple fixed_ips exist, using the first: %s'),
+ fixed_ips[0])
self.network_api.associate_floating_ip(context, instance,
floating_address=public_ip,
@@ -1328,6 +1367,9 @@ def run_instances(self, context, **kwargs):
msg = _('Image must be available')
raise exception.ImageNotActive(message=msg)
+ iisb = kwargs.get('instance_initiated_shutdown_behavior', 'stop')
+ shutdown_terminate = (iisb == 'terminate')
+
flavor = objects.Flavor.get_by_name(context,
kwargs.get('instance_type', None))
@@ -1343,7 +1385,8 @@ def run_instances(self, context, **kwargs):
security_group=kwargs.get('security_group'),
availability_zone=kwargs.get('placement', {}).get(
'availability_zone'),
- block_device_mapping=kwargs.get('block_device_mapping', {}))
+ block_device_mapping=kwargs.get('block_device_mapping', {}),
+ shutdown_terminate=shutdown_terminate)
instances = self._format_run_instances(context, resv_id)
if instances:
@@ -1398,7 +1441,7 @@ def _ec2_ids_to_instances(self, context, instance_id):
instances = []
extra = ['system_metadata', 'metadata', 'info_cache']
for ec2_id in instance_id:
- validate_ec2_id(ec2_id)
+ validate_instance_id(ec2_id)
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
instance = objects.Instance.get_by_uuid(
context, instance_uuid, expected_attrs=extra)
@@ -1435,7 +1478,8 @@ def stop_instances(self, context, instance_id, **kwargs):
for instance in instances:
extensions.check_compute_policy(context, 'stop', instance)
self.compute_api.stop(context, instance)
- return True
+ return self._format_stop_instances(context, instance_id,
+ instances)
def start_instances(self, context, instance_id, **kwargs):
"""Start each instances in instance_id.
@@ -1446,7 +1490,8 @@ def start_instances(self, context, instance_id, **kwargs):
for instance in instances:
extensions.check_compute_policy(context, 'start', instance)
self.compute_api.start(context, instance)
- return True
+ return self._format_start_instances(context, instance_id,
+ instances)
def _get_image(self, context, ec2_id):
try:
@@ -1674,7 +1719,7 @@ def create_image(self, context, instance_id, **kwargs):
# do so here
no_reboot = kwargs.get('no_reboot', False)
name = kwargs.get('name')
- validate_ec2_id(instance_id)
+ validate_instance_id(instance_id)
ec2_instance_id = instance_id
instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_instance_id)
instance = self.compute_api.get(context, instance_uuid,
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index 97cbe5627a..963acb892c 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -19,12 +19,11 @@
from nova import availability_zones
from nova import context
-from nova import db
from nova import exception
+from nova.i18n import _
from nova.network import model as network_model
from nova import objects
from nova.objects import base as obj_base
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
from nova.openstack.common import timeutils
@@ -102,7 +101,7 @@ def resource_type_from_id(context, resource_id):
@memoize
def id_to_glance_id(context, image_id):
"""Convert an internal (db) id to a glance id."""
- return db.s3_image_get(context, image_id)['uuid']
+ return objects.S3ImageMapping.get_by_id(context, image_id).uuid
@memoize
@@ -111,9 +110,11 @@ def glance_id_to_id(context, glance_id):
if not glance_id:
return
try:
- return db.s3_image_get_by_uuid(context, glance_id)['id']
+ return objects.S3ImageMapping.get_by_uuid(context, glance_id).id
except exception.NotFound:
- return db.s3_image_create(context, glance_id)['id']
+ s3imap = objects.S3ImageMapping(context, uuid=glance_id)
+ s3imap.create()
+ return s3imap.id
def ec2_id_to_glance_id(context, ec2_id):
@@ -344,14 +345,18 @@ def get_int_id_from_snapshot_uuid(context, snapshot_uuid):
if snapshot_uuid is None:
return
try:
- return db.get_ec2_snapshot_id_by_uuid(context, snapshot_uuid)
+ smap = objects.EC2SnapshotMapping.get_by_uuid(context, snapshot_uuid)
+ return smap.id
except exception.NotFound:
- return db.ec2_snapshot_create(context, snapshot_uuid)['id']
+ smap = objects.EC2SnapshotMapping(context, uuid=snapshot_uuid)
+ smap.create()
+ return smap.id
@memoize
def get_snapshot_uuid_from_int_id(context, int_id):
- return db.get_snapshot_uuid_by_ec2_id(context, int_id)
+ smap = objects.EC2SnapshotMapping.get_by_id(context, int_id)
+ return smap.uuid
_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py
index 7d77807dba..86318ceb37 100644
--- a/nova/api/metadata/base.py
+++ b/nova/api/metadata/base.py
@@ -17,7 +17,6 @@
"""Instance Metadata information."""
import base64
-import json
import os
import posixpath
@@ -33,6 +32,7 @@
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import importutils
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
@@ -327,7 +327,7 @@ def _metadata_as_json(self, version, path):
metadata['random_seed'] = base64.b64encode(os.urandom(512))
self.set_mimetype(MIME_TYPE_APPLICATION_JSON)
- return json.dumps(metadata)
+ return jsonutils.dumps(metadata)
def _handle_content(self, path_tokens):
if len(path_tokens) == 1:
@@ -361,7 +361,7 @@ def _password(self, version, path):
def _vendor_data(self, version, path):
if self._check_os_version(HAVANA, version):
self.set_mimetype(MIME_TYPE_APPLICATION_JSON)
- return json.dumps(self.vddriver.get())
+ return jsonutils.dumps(self.vddriver.get())
raise KeyError(path)
def _check_version(self, required, requested, versions=VERSIONS):
@@ -440,7 +440,7 @@ def metadata_for_config_drive(self):
pass
filepath = os.path.join('ec2', version, 'meta-data.json')
- yield (filepath, json.dumps(data['meta-data']))
+ yield (filepath, jsonutils.dumps(data['meta-data']))
ALL_OPENSTACK_VERSIONS = OPENSTACK_VERSIONS + ["latest"]
for version in ALL_OPENSTACK_VERSIONS:
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index 29c0443b29..9ae64b7242 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -27,7 +27,9 @@
from nova.api.metadata import base
from nova import conductor
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova.i18n import _LE
+from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
from nova import utils
@@ -40,17 +42,22 @@
metadata_proxy_opts = [
cfg.BoolOpt(
- 'service_neutron_metadata_proxy',
+ 'service_metadata_proxy',
default=False,
help='Set flag to indicate Neutron will proxy metadata requests and '
- 'resolve instance ids.'),
+ 'resolve instance ids.',
+ deprecated_group='DEFAULT',
+ deprecated_name='service_neutron_metadata_proxy'),
cfg.StrOpt(
- 'neutron_metadata_proxy_shared_secret',
+ 'metadata_proxy_shared_secret',
default='', secret=True,
- help='Shared secret to validate proxies Neutron metadata requests')
+ help='Shared secret to validate proxies Neutron metadata requests',
+ deprecated_group='DEFAULT',
+ deprecated_name='neutron_metadata_proxy_shared_secret')
]
-CONF.register_opts(metadata_proxy_opts)
+# metadata_proxy_opts options in the DEFAULT group were deprecated in Juno
+CONF.register_opts(metadata_proxy_opts, 'neutron')
LOG = logging.getLogger(__name__)
@@ -104,14 +111,14 @@ def __call__(self, req):
req.response.content_type = base.MIME_TYPE_TEXT_PLAIN
return req.response
- if CONF.service_neutron_metadata_proxy:
+ if CONF.neutron.service_metadata_proxy:
meta_data = self._handle_instance_id_request(req)
else:
if req.headers.get('X-Instance-ID'):
LOG.warn(
- _("X-Instance-ID present in request headers. The "
- "'service_neutron_metadata_proxy' option must be enabled"
- " to process this header."))
+ _LW("X-Instance-ID present in request headers. The "
+ "'service_metadata_proxy' option must be "
+ "enabled to process this header."))
meta_data = self._handle_remote_ip_request(req)
if meta_data is None:
@@ -145,7 +152,8 @@ def _handle_remote_ip_request(self, req):
raise webob.exc.HTTPInternalServerError(explanation=unicode(msg))
if meta_data is None:
- LOG.error(_('Failed to get metadata for ip: %s'), remote_address)
+ LOG.error(_LE('Failed to get metadata for ip: %s'),
+ remote_address)
return meta_data
@@ -172,16 +180,16 @@ def _handle_instance_id_request(self, req):
raise webob.exc.HTTPBadRequest(explanation=msg)
expected_signature = hmac.new(
- CONF.neutron_metadata_proxy_shared_secret,
+ CONF.neutron.metadata_proxy_shared_secret,
instance_id,
hashlib.sha256).hexdigest()
if not utils.constant_time_compare(expected_signature, signature):
if instance_id:
- LOG.warn(_('X-Instance-ID-Signature: %(signature)s does not '
- 'match the expected value: %(expected_signature)s '
- 'for id: %(instance_id)s. Request From: '
- '%(remote_address)s'),
+ LOG.warn(_LW('X-Instance-ID-Signature: %(signature)s does '
+ 'not match the expected value: '
+ '%(expected_signature)s for id: %(instance_id)s.'
+ ' Request From: %(remote_address)s'),
{'signature': signature,
'expected_signature': expected_signature,
'instance_id': instance_id,
@@ -201,14 +209,12 @@ def _handle_instance_id_request(self, req):
raise webob.exc.HTTPInternalServerError(explanation=unicode(msg))
if meta_data is None:
- LOG.error(_('Failed to get metadata for instance id: %s'),
+ LOG.error(_LE('Failed to get metadata for instance id: %s'),
instance_id)
-
- if meta_data.instance['project_id'] != tenant_id:
- LOG.warning(_("Tenant_id %(tenant_id)s does not match tenant_id "
- "of instance %(instance_id)s."),
- {'tenant_id': tenant_id,
- 'instance_id': instance_id})
+ elif meta_data.instance['project_id'] != tenant_id:
+ LOG.warn(_LW("Tenant_id %(tenant_id)s does not match tenant_id "
+ "of instance %(instance_id)s."),
+ {'tenant_id': tenant_id, 'instance_id': instance_id})
# causes a 404 to be raised
meta_data = None
diff --git a/nova/api/metadata/password.py b/nova/api/metadata/password.py
index ec32d29e84..6e067797b0 100644
--- a/nova/api/metadata/password.py
+++ b/nova/api/metadata/password.py
@@ -16,8 +16,8 @@
from webob import exc
from nova import context
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
from nova import utils
diff --git a/nova/api/metadata/vendordata_json.py b/nova/api/metadata/vendordata_json.py
index 55edd55120..ee8a938784 100644
--- a/nova/api/metadata/vendordata_json.py
+++ b/nova/api/metadata/vendordata_json.py
@@ -20,7 +20,7 @@
from oslo.config import cfg
from nova.api.metadata import base
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -44,12 +44,13 @@ def __init__(self, *args, **kwargs):
data = jsonutils.load(fp)
except IOError as e:
if e.errno == errno.ENOENT:
- LOG.warn(logprefix + _("file does not exist"))
+ LOG.warn(logprefix + _LW("file does not exist"))
else:
- LOG.warn(logprefix + _("Unexpected IOError when reading"))
+ LOG.warn(logprefix + _LW("Unexpected IOError when "
+ "reading"))
raise e
except ValueError:
- LOG.warn(logprefix + _("failed to load json"))
+ LOG.warn(logprefix + _LW("failed to load json"))
raise
self._data = data
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index cac785241d..b0e1e9e884 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -27,9 +27,12 @@
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
+from nova.i18n import _
+from nova.i18n import _LC
+from nova.i18n import _LI
+from nova.i18n import _LW
+from nova.i18n import translate
from nova import notifications
-from nova.openstack.common import gettextutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
from nova import wsgi as base_wsgi
@@ -95,7 +98,7 @@ def _error(self, inner, req):
status = 500
msg_dict = dict(url=req.url, status=status)
- LOG.info(_("%(url)s returned with HTTP %(status)d") % msg_dict)
+ LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
outer = self.status_to_type(status)
if headers:
outer.headers = headers
@@ -107,12 +110,8 @@ def _error(self, inner, req):
# inconsistent with the EC2 API to hide every exception,
# including those that are safe to expose, see bug 1021373
if safe:
- if isinstance(inner.msg_fmt, gettextutils.Message):
- user_locale = req.best_match_language()
- inner_msg = gettextutils.translate(
- inner.msg_fmt, user_locale)
- else:
- inner_msg = unicode(inner)
+ user_locale = req.best_match_language()
+ inner_msg = translate(inner.message, user_locale)
outer.explanation = '%s: %s' % (inner.__class__.__name__,
inner_msg)
@@ -233,9 +232,9 @@ def _setup_extensions(self, ext_mgr):
msg_format_dict = {'collection': collection,
'ext_name': extension.extension.name}
if collection not in self.resources:
- LOG.warning(_('Extension %(ext_name)s: Cannot extend '
- 'resource %(collection)s: No such resource'),
- msg_format_dict)
+ LOG.warn(_LW('Extension %(ext_name)s: Cannot extend '
+ 'resource %(collection)s: No such resource'),
+ msg_format_dict)
continue
LOG.debug('Extension %(ext_name)s extended resource: '
@@ -280,19 +279,19 @@ def _check_load_extension(ext):
if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:
return self._register_extension(ext)
else:
- LOG.warning(_("Not loading %s because it is "
- "in the blacklist"), ext.obj.alias)
+ LOG.warn(_LW("Not loading %s because it is "
+ "in the blacklist"), ext.obj.alias)
return False
else:
- LOG.warning(
- _("Not loading %s because it is not in the whitelist"),
- ext.obj.alias)
+ LOG.warn(
+ _LW("Not loading %s because it is not in the "
+ "whitelist"), ext.obj.alias)
return False
else:
return False
if not CONF.osapi_v3.enabled:
- LOG.info(_("V3 API has been disabled by configuration"))
+ LOG.info(_LI("V3 API has been disabled by configuration"))
return
self.init_only = init_only
@@ -305,8 +304,8 @@ def _check_load_extension(ext):
CONF.osapi_v3.extensions_whitelist).intersection(
CONF.osapi_v3.extensions_blacklist)
if len(in_blacklist_and_whitelist) != 0:
- LOG.warning(_("Extensions in both blacklist and whitelist: %s"),
- list(in_blacklist_and_whitelist))
+ LOG.warn(_LW("Extensions in both blacklist and whitelist: %s"),
+ list(in_blacklist_and_whitelist))
self.api_extension_manager = stevedore.enabled.EnabledExtensionManager(
namespace=self.API_EXTENSION_NAMESPACE,
@@ -329,7 +328,7 @@ def _check_load_extension(ext):
missing_core_extensions = self.get_missing_core_extensions(
self.loaded_extension_info.get_extensions().keys())
if not self.init_only and missing_core_extensions:
- LOG.critical(_("Missing core API extensions: %s"),
+ LOG.critical(_LC("Missing core API extensions: %s"),
missing_core_extensions)
raise exception.CoreAPIMissing(
missing_apis=missing_core_extensions)
@@ -407,9 +406,9 @@ def _register_controllers(self, ext):
controller = extension.controller
if collection not in self.resources:
- LOG.warning(_('Extension %(ext_name)s: Cannot extend '
- 'resource %(collection)s: No such resource'),
- {'ext_name': ext_name, 'collection': collection})
+ LOG.warn(_LW('Extension %(ext_name)s: Cannot extend '
+ 'resource %(collection)s: No such resource'),
+ {'ext_name': ext_name, 'collection': collection})
continue
LOG.debug('Extension %(ext_name)s extending resource: '
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index d0105e57d7..91fcbffc55 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -29,7 +29,9 @@
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova.i18n import _LE
+from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova import quota
@@ -129,23 +131,24 @@ def status_from_state(vm_state, task_state='default'):
task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN'))
status = task_map.get(task_state, task_map['default'])
if status == "UNKNOWN":
- LOG.error(_("status is UNKNOWN from vm_state=%(vm_state)s "
- "task_state=%(task_state)s. Bad upgrade or db "
- "corrupted?"),
+ LOG.error(_LE("status is UNKNOWN from vm_state=%(vm_state)s "
+ "task_state=%(task_state)s. Bad upgrade or db "
+ "corrupted?"),
{'vm_state': vm_state, 'task_state': task_state})
return status
-def task_and_vm_state_from_status(status):
- """Map the server status string to list of vm states and
+def task_and_vm_state_from_status(statuses):
+ """Map the server's multiple status strings to list of vm states and
list of task states.
"""
vm_states = set()
task_states = set()
+ lower_statuses = [status.lower() for status in statuses]
for state, task_map in _STATE_MAP.iteritems():
for task_state, mapped_state in task_map.iteritems():
status_string = mapped_state
- if status.lower() == status_string.lower():
+ if status_string.lower() in lower_statuses:
vm_states.add(state)
task_states.add(task_state)
# Add sort to avoid different order on set in Python 3
@@ -273,9 +276,8 @@ def remove_version_from_href(href):
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
- msg = _('href %s does not contain version') % href
- LOG.debug(msg)
- raise ValueError(msg)
+ LOG.debug('href %s does not contain version' % href)
+ raise ValueError(_('href %s does not contain version') % href)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
@@ -289,8 +291,7 @@ def check_img_metadata_properties_quota(context, metadata):
QUOTAS.limit_check(context, metadata_items=len(metadata))
except exception.OverQuota:
expl = _("Image metadata limit exceeded")
- raise webob.exc.HTTPRequestEntityTooLarge(explanation=expl,
- headers={'Retry-After': 0})
+ raise webob.exc.HTTPForbidden(explanation=expl)
# check the key length.
if isinstance(metadata, dict):
@@ -451,8 +452,8 @@ def check_snapshots_enabled(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.allow_instance_snapshots:
- LOG.warn(_('Rejecting snapshot request, snapshots currently'
- ' disabled'))
+ LOG.warn(_LW('Rejecting snapshot request, snapshots currently'
+ ' disabled'))
msg = _("Instance snapshots are not permitted at this time.")
raise webob.exc.HTTPBadRequest(explanation=msg)
return f(*args, **kwargs)
diff --git a/nova/api/openstack/compute/consoles.py b/nova/api/openstack/compute/consoles.py
index 67e3a7ad86..40b4aceabe 100644
--- a/nova/api/openstack/compute/consoles.py
+++ b/nova/api/openstack/compute/consoles.py
@@ -111,10 +111,6 @@ def show(self, req, server_id, id):
raise exc.HTTPNotFound()
return _translate_detail_keys(console)
- def update(self, req, server_id, id, body):
- """You can't update a console."""
- raise exc.HTTPNotImplemented()
-
def delete(self, req, server_id, id):
"""Deletes a console."""
try:
diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py
index 4092cbbeaf..361ab482a6 100644
--- a/nova/api/openstack/compute/contrib/admin_actions.py
+++ b/nova/api/openstack/compute/contrib/admin_actions.py
@@ -24,7 +24,8 @@
from nova import compute
from nova.compute import vm_states
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova.i18n import _LE
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
@@ -62,12 +63,9 @@ def _pause(self, req, id, body):
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
- except NotImplementedError:
- msg = _("Virt driver does not implement pause function.")
- raise exc.HTTPNotImplemented(explanation=msg)
except Exception:
readable = traceback.format_exc()
- LOG.exception(_("Compute.api::pause %s"), readable)
+ LOG.exception(_LE("Compute.api::pause %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@@ -87,12 +85,9 @@ def _unpause(self, req, id, body):
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(explanation=msg)
- except NotImplementedError:
- msg = _("Virt driver does not implement unpause function.")
- raise exc.HTTPNotImplemented(explanation=msg)
except Exception:
readable = traceback.format_exc()
- LOG.exception(_("Compute.api::unpause %s"), readable)
+ LOG.exception(_LE("Compute.api::unpause %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@@ -114,7 +109,7 @@ def _suspend(self, req, id, body):
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
- LOG.exception(_("compute.api::suspend %s"), readable)
+ LOG.exception(_LE("compute.api::suspend %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@@ -136,7 +131,7 @@ def _resume(self, req, id, body):
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
- LOG.exception(_("compute.api::resume %s"), readable)
+ LOG.exception(_LE("compute.api::resume %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@@ -149,9 +144,7 @@ def _migrate(self, req, id, body):
instance = self.compute_api.get(context, id, want_objects=True)
self.compute_api.resize(req.environ['nova.context'], instance)
except exception.QuotaError as error:
- raise exc.HTTPRequestEntityTooLarge(
- explanation=error.format_message(),
- headers={'Retry-After': 0})
+ raise exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
@@ -162,7 +155,7 @@ def _migrate(self, req, id, body):
except exception.NoValidHost as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except Exception as e:
- LOG.exception(_("Error in migrate %s"), e)
+ LOG.exception(_LE("Error in migrate %s"), e)
raise exc.HTTPBadRequest()
return webob.Response(status_int=202)
@@ -181,7 +174,7 @@ def _reset_network(self, req, id, body):
raise exc.HTTPConflict(explanation=e.format_message())
except Exception:
readable = traceback.format_exc()
- LOG.exception(_("Compute.api::reset_network %s"), readable)
+ LOG.exception(_LE("Compute.api::reset_network %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@@ -200,7 +193,7 @@ def _inject_network_info(self, req, id, body):
raise exc.HTTPConflict(explanation=e.format_message())
except Exception:
readable = traceback.format_exc()
- LOG.exception(_("Compute.api::inject_network_info %s"), readable)
+ LOG.exception(_LE("Compute.api::inject_network_info %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@@ -217,7 +210,7 @@ def _lock(self, req, id, body):
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
- LOG.exception(_("Compute.api::lock %s"), readable)
+ LOG.exception(_LE("Compute.api::lock %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@@ -236,7 +229,7 @@ def _unlock(self, req, id, body):
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
- LOG.exception(_("Compute.api::unlock %s"), readable)
+ LOG.exception(_LE("Compute.api::unlock %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
@@ -394,7 +387,7 @@ def _reset_state(self, req, id, body):
raise exc.HTTPNotFound(explanation=msg)
except Exception:
readable = traceback.format_exc()
- LOG.exception(_("Compute.api::resetState %s"), readable)
+ LOG.exception(_LE("Compute.api::resetState %s"), readable)
raise exc.HTTPUnprocessableEntity()
return webob.Response(status_int=202)
diff --git a/nova/api/openstack/compute/contrib/agents.py b/nova/api/openstack/compute/contrib/agents.py
index c69159c5ab..c05eb4ef2f 100644
--- a/nova/api/openstack/compute/contrib/agents.py
+++ b/nova/api/openstack/compute/contrib/agents.py
@@ -19,6 +19,7 @@
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
+from nova.i18n import _
from nova import objects
from nova import utils
@@ -95,8 +96,9 @@ def update(self, req, id, body):
url = para['url']
md5hash = para['md5hash']
version = para['version']
- except (TypeError, KeyError):
- raise webob.exc.HTTPUnprocessableEntity()
+ except (TypeError, KeyError) as ex:
+ msg = _("Invalid request body: %s") % unicode(ex)
+ raise webob.exc.HTTPBadRequest(explanation=msg)
try:
utils.check_string_length(url, 'url', max_length=255)
@@ -112,8 +114,9 @@ def update(self, req, id, body):
agent.url = url
agent.md5hash = md5hash
agent.save()
- except ValueError:
- raise webob.exc.HTTPUnprocessableEntity()
+ except ValueError as ex:
+ msg = _("Invalid request body: %s") % unicode(ex)
+ raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.AgentBuildNotFound as ex:
raise webob.exc.HTTPNotFound(explanation=ex.format_message())
@@ -149,8 +152,9 @@ def create(self, req, body):
version = agent['version']
url = agent['url']
md5hash = agent['md5hash']
- except (TypeError, KeyError):
- raise webob.exc.HTTPUnprocessableEntity()
+ except (TypeError, KeyError) as ex:
+ msg = _("Invalid request body: %s") % unicode(ex)
+ raise webob.exc.HTTPBadRequest(explanation=msg)
try:
utils.check_string_length(hypervisor, 'hypervisor', max_length=255)
@@ -174,7 +178,7 @@ def create(self, req, body):
agent_obj.create()
agent['agent_id'] = agent_obj.id
except exception.AgentBuildExists as ex:
- raise webob.exc.HTTPServerError(explanation=ex.format_message())
+ raise webob.exc.HTTPConflict(explanation=ex.format_message())
return {'agent': agent}
diff --git a/nova/api/openstack/compute/contrib/aggregates.py b/nova/api/openstack/compute/contrib/aggregates.py
index c5435c5f21..852f4be935 100644
--- a/nova/api/openstack/compute/contrib/aggregates.py
+++ b/nova/api/openstack/compute/contrib/aggregates.py
@@ -22,11 +22,9 @@
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova import exception
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common import log as logging
+from nova.i18n import _
from nova import utils
-LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'aggregates')
@@ -37,10 +35,19 @@ def _get_context(req):
def get_host_from_body(fn):
"""Makes sure that the host exists."""
def wrapped(self, req, id, body, *args, **kwargs):
- if len(body) == 1 and "host" in body:
- host = body['host']
- else:
- raise exc.HTTPBadRequest()
+ if len(body) != 1:
+ msg = _('Only host parameter can be specified')
+ raise exc.HTTPBadRequest(explanation=msg)
+ elif 'host' not in body:
+ msg = _('Host parameter must be specified')
+ raise exc.HTTPBadRequest(explanation=msg)
+ try:
+ utils.check_string_length(body['host'], 'host', 1, 255)
+ except exception.InvalidInput as e:
+ raise exc.HTTPBadRequest(explanation=e.format_message())
+
+ host = body['host']
+
return fn(self, req, id, host, *args, **kwargs)
return wrapped
@@ -219,7 +226,8 @@ def _set_metadata(self, req, id, body):
try:
for key, value in metadata.items():
utils.check_string_length(key, "metadata.key", 1, 255)
- utils.check_string_length(value, "metadata.value", 0, 255)
+ if value is not None:
+ utils.check_string_length(value, "metadata.value", 0, 255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
try:
diff --git a/nova/api/openstack/compute/contrib/assisted_volume_snapshots.py b/nova/api/openstack/compute/contrib/assisted_volume_snapshots.py
index b54008c278..76ea7d10e1 100644
--- a/nova/api/openstack/compute/contrib/assisted_volume_snapshots.py
+++ b/nova/api/openstack/compute/contrib/assisted_volume_snapshots.py
@@ -19,7 +19,7 @@
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
diff --git a/nova/api/openstack/compute/contrib/attach_interfaces.py b/nova/api/openstack/compute/contrib/attach_interfaces.py
index fdc6441b47..c2fce038f0 100644
--- a/nova/api/openstack/compute/contrib/attach_interfaces.py
+++ b/nova/api/openstack/compute/contrib/attach_interfaces.py
@@ -18,11 +18,12 @@
import webob
from webob import exc
+from nova.api.openstack import common
from nova.api.openstack import extensions
from nova import compute
from nova import exception
+from nova.i18n import _
from nova import network
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
@@ -60,10 +61,7 @@ def show(self, req, server_id, id):
authorize(context)
port_id = id
- try:
- self.compute_api.get(context, server_id)
- except exception.NotFound:
- raise exc.HTTPNotFound()
+ common.get_instance(self.compute_api, context, server_id)
try:
port_info = self.network_api.show_port(context, port_id)
@@ -99,8 +97,9 @@ def create(self, req, server_id, body):
raise exc.HTTPBadRequest()
try:
- instance = self.compute_api.get(context, server_id,
- want_objects=True)
+ instance = common.get_instance(self.compute_api,
+ context, server_id,
+ want_objects=True)
LOG.audit(_("Attach interface"), instance=instance)
vif = self.compute_api.attach_interface(context,
instance, network_id, port_id, req_ip)
@@ -111,8 +110,6 @@ def create(self, req, server_id, body):
exception.NetworkAmbiguous,
exception.NetworkNotFound) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
- except exception.NotFound as e:
- raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except NotImplementedError:
@@ -122,27 +119,21 @@ def create(self, req, server_id, body):
LOG.exception(e)
msg = _("Failed to attach interface")
raise webob.exc.HTTPInternalServerError(explanation=msg)
+ except exception.InstanceInvalidState as state_error:
+ common.raise_http_conflict_for_instance_invalid_state(state_error,
+ 'attach_interface')
return self.show(req, server_id, vif['id'])
- def update(self, req, server_id, id, body):
- """Update a interface attachment. We don't currently support this."""
- msg = _("Attachments update is not supported")
- raise exc.HTTPNotImplemented(explanation=msg)
-
def delete(self, req, server_id, id):
"""Detach an interface from an instance."""
context = req.environ['nova.context']
authorize(context)
port_id = id
-
- try:
- instance = self.compute_api.get(context, server_id,
- want_objects=True)
- LOG.audit(_("Detach interface %s"), port_id, instance=instance)
-
- except exception.NotFound:
- raise exc.HTTPNotFound()
+ instance = common.get_instance(self.compute_api,
+ context, server_id,
+ want_objects=True)
+ LOG.audit(_("Detach interface %s"), port_id, instance=instance)
try:
self.compute_api.detach_interface(context,
instance, port_id=port_id)
@@ -153,6 +144,9 @@ def delete(self, req, server_id, id):
except NotImplementedError:
msg = _("Network driver does not support this function.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
+ except exception.InstanceInvalidState as state_error:
+ common.raise_http_conflict_for_instance_invalid_state(state_error,
+ 'detach_interface')
return webob.Response(status_int=202)
@@ -160,12 +154,7 @@ def _items(self, req, server_id, entity_maker):
"""Returns a list of attachments, transformed through entity_maker."""
context = req.environ['nova.context']
authorize(context)
-
- try:
- instance = self.compute_api.get(context, server_id)
- except exception.NotFound:
- raise exc.HTTPNotFound()
-
+ instance = common.get_instance(self.compute_api, context, server_id)
results = []
search_opts = {'device_id': instance['uuid']}
diff --git a/nova/api/openstack/compute/contrib/availability_zone.py b/nova/api/openstack/compute/contrib/availability_zone.py
index 136cd2355c..688f0602f3 100644
--- a/nova/api/openstack/compute/contrib/availability_zone.py
+++ b/nova/api/openstack/compute/contrib/availability_zone.py
@@ -19,7 +19,7 @@
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import availability_zones
-from nova import db
+from nova import objects
from nova import servicegroup
CONF = cfg.CONF
@@ -103,7 +103,7 @@ def _describe_availability_zones_verbose(self, context, **kwargs):
availability_zones.get_availability_zones(ctxt)
# Available services
- enabled_services = db.service_get_all(context, False)
+ enabled_services = objects.ServiceList.get_all(context, disabled=False)
enabled_services = availability_zones.set_availability_zones(context,
enabled_services)
zone_hosts = {}
diff --git a/nova/api/openstack/compute/contrib/baremetal_nodes.py b/nova/api/openstack/compute/contrib/baremetal_nodes.py
index 22a3faa98c..bff5a45be0 100644
--- a/nova/api/openstack/compute/contrib/baremetal_nodes.py
+++ b/nova/api/openstack/compute/contrib/baremetal_nodes.py
@@ -22,7 +22,7 @@
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.virt.baremetal import db
authorize = extensions.extension_authorizer('compute', 'baremetal_nodes')
diff --git a/nova/api/openstack/compute/contrib/cells.py b/nova/api/openstack/compute/contrib/cells.py
index 3937c24338..c078506f1d 100644
--- a/nova/api/openstack/compute/contrib/cells.py
+++ b/nova/api/openstack/compute/contrib/cells.py
@@ -28,14 +28,12 @@
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute
from nova import exception
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common import log as logging
+from nova.i18n import _
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova import rpc
-LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
@@ -270,7 +268,10 @@ def show(self, req, id):
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
+
authorize(context)
+ authorize(context, action="delete")
+
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
@@ -283,18 +284,15 @@ def _validate_cell_name(self, cell_name):
"""Validate cell name is not empty and doesn't contain '!' or '.'."""
if not cell_name:
msg = _("Cell name cannot be empty")
- LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
if '!' in cell_name or '.' in cell_name:
msg = _("Cell name cannot contain '!' or '.'")
- LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _validate_cell_type(self, cell_type):
"""Validate cell_type is 'parent' or 'child'."""
if cell_type not in ['parent', 'child']:
msg = _("Cell type must be 'parent' or 'child'")
- LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _normalize_cell(self, cell, existing=None):
@@ -347,15 +345,16 @@ def _normalize_cell(self, cell, existing=None):
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
+
authorize(context)
+ authorize(context, action="create")
+
if 'cell' not in body:
msg = _("No cell information in request")
- LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
if 'name' not in cell:
msg = _("No cell name in request")
- LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
self._validate_cell_name(cell['name'])
self._normalize_cell(cell)
@@ -371,10 +370,12 @@ def create(self, req, body):
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
+
authorize(context)
+ authorize(context, action="update")
+
if 'cell' not in body:
msg = _("No cell information in request")
- LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
cell.pop('id', None)
@@ -403,7 +404,10 @@ def update(self, req, id, body):
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
+
authorize(context)
+ authorize(context, action="sync_instances")
+
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
diff --git a/nova/api/openstack/compute/contrib/certificates.py b/nova/api/openstack/compute/contrib/certificates.py
index a483c44085..dd6b7d3e71 100644
--- a/nova/api/openstack/compute/contrib/certificates.py
+++ b/nova/api/openstack/compute/contrib/certificates.py
@@ -18,7 +18,8 @@
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
import nova.cert.rpcapi
-from nova.openstack.common.gettextutils import _
+from nova import exception
+from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'certificates')
@@ -58,8 +59,11 @@ def show(self, req, id):
if id != 'root':
msg = _("Only root certificate can be retrieved.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
- cert = self.cert_rpcapi.fetch_ca(context,
- project_id=context.project_id)
+ try:
+ cert = self.cert_rpcapi.fetch_ca(context,
+ project_id=context.project_id)
+ except exception.CryptoCAFileNotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.format_message())
return {'certificate': _translate_certificate_view(cert)}
@wsgi.serializers(xml=CertificateTemplate)
diff --git a/nova/api/openstack/compute/contrib/cloudpipe.py b/nova/api/openstack/compute/contrib/cloudpipe.py
index ec24a56a13..c10c45b7f2 100644
--- a/nova/api/openstack/compute/contrib/cloudpipe.py
+++ b/nova/api/openstack/compute/contrib/cloudpipe.py
@@ -25,9 +25,9 @@
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import exception
+from nova.i18n import _
from nova import network
from nova.openstack.common import fileutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
from nova import utils
diff --git a/nova/api/openstack/compute/contrib/cloudpipe_update.py b/nova/api/openstack/compute/contrib/cloudpipe_update.py
index 7ee8f14d7a..662915ba8e 100644
--- a/nova/api/openstack/compute/contrib/cloudpipe_update.py
+++ b/nova/api/openstack/compute/contrib/cloudpipe_update.py
@@ -13,12 +13,13 @@
# under the License.
+import webob
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'cloudpipe_update')
@@ -51,10 +52,11 @@ def update(self, req, id, body):
network.vpn_public_address = vpn_ip
network.vpn_public_port = vpn_port
network.save()
- except (TypeError, KeyError, ValueError):
- raise webob.exc.HTTPUnprocessableEntity()
+ except (TypeError, KeyError, ValueError) as ex:
+ msg = _("Invalid request body: %s") % unicode(ex)
+ raise webob.exc.HTTPBadRequest(explanation=msg)
- return webob.exc.HTTPAccepted()
+ return webob.Response(status_int=202)
class Cloudpipe_update(extensions.ExtensionDescriptor):
diff --git a/nova/api/openstack/compute/contrib/console_auth_tokens.py b/nova/api/openstack/compute/contrib/console_auth_tokens.py
index 681cb8577e..a56e636b89 100644
--- a/nova/api/openstack/compute/contrib/console_auth_tokens.py
+++ b/nova/api/openstack/compute/contrib/console_auth_tokens.py
@@ -18,7 +18,7 @@
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.consoleauth import rpcapi as consoleauth_rpcapi
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'console_auth_tokens')
diff --git a/nova/api/openstack/compute/contrib/console_output.py b/nova/api/openstack/compute/contrib/console_output.py
index fab5c25afd..63aa1c8f04 100644
--- a/nova/api/openstack/compute/contrib/console_output.py
+++ b/nova/api/openstack/compute/contrib/console_output.py
@@ -22,7 +22,7 @@
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'console_output')
diff --git a/nova/api/openstack/compute/contrib/consoles.py b/nova/api/openstack/compute/contrib/consoles.py
index 2ea7ca24d2..cf97764f6e 100644
--- a/nova/api/openstack/compute/contrib/consoles.py
+++ b/nova/api/openstack/compute/contrib/consoles.py
@@ -18,7 +18,7 @@
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'consoles')
diff --git a/nova/api/openstack/compute/contrib/deferred_delete.py b/nova/api/openstack/compute/contrib/deferred_delete.py
index cb895f625a..f457111fad 100644
--- a/nova/api/openstack/compute/contrib/deferred_delete.py
+++ b/nova/api/openstack/compute/contrib/deferred_delete.py
@@ -43,9 +43,7 @@ def _restore(self, req, id, body):
try:
self.compute_api.restore(context, instance)
except exception.QuotaError as error:
- raise webob.exc.HTTPRequestEntityTooLarge(
- explanation=error.format_message(),
- headers={'Retry-After': 0})
+ raise webob.exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'restore')
diff --git a/nova/api/openstack/compute/contrib/disk_config.py b/nova/api/openstack/compute/contrib/disk_config.py
index 19817eab24..7118be549f 100644
--- a/nova/api/openstack/compute/contrib/disk_config.py
+++ b/nova/api/openstack/compute/contrib/disk_config.py
@@ -19,7 +19,7 @@
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import strutils
ALIAS = 'OS-DCF'
diff --git a/nova/api/openstack/compute/contrib/evacuate.py b/nova/api/openstack/compute/contrib/evacuate.py
index 723167a993..ba5e62ca4a 100644
--- a/nova/api/openstack/compute/contrib/evacuate.py
+++ b/nova/api/openstack/compute/contrib/evacuate.py
@@ -20,7 +20,7 @@
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import strutils
from nova import utils
@@ -28,15 +28,17 @@
class Controller(wsgi.Controller):
- def __init__(self, *args, **kwargs):
+ def __init__(self, ext_mgr, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
self.host_api = compute.HostAPI()
+ self.ext_mgr = ext_mgr
@wsgi.action('evacuate')
def _evacuate(self, req, id, body):
"""Permit admins to evacuate a server from a failed host
to a new one.
+ If host is empty, the scheduler will select one.
"""
context = req.environ["nova.context"]
authorize(context)
@@ -45,12 +47,18 @@ def _evacuate(self, req, id, body):
raise exc.HTTPBadRequest(_("Malformed request body"))
evacuate_body = body["evacuate"]
+ host = evacuate_body.get("host")
+
+ if (not host and
+ not self.ext_mgr.is_loaded('os-extended-evacuate-find-host')):
+ msg = _("host must be specified.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
try:
- host = evacuate_body["host"]
on_shared_storage = strutils.bool_from_string(
evacuate_body["onSharedStorage"])
except (TypeError, KeyError):
- msg = _("host and onSharedStorage must be specified.")
+ msg = _("onSharedStorage must be specified.")
raise exc.HTTPBadRequest(explanation=msg)
password = None
@@ -65,11 +73,12 @@ def _evacuate(self, req, id, body):
elif not on_shared_storage:
password = utils.generate_password()
- try:
- self.host_api.service_get_by_compute_host(context, host)
- except exception.NotFound:
- msg = _("Compute host %s not found.") % host
- raise exc.HTTPNotFound(explanation=msg)
+ if host is not None:
+ try:
+ self.host_api.service_get_by_compute_host(context, host)
+ except exception.NotFound:
+ msg = _("Compute host %s not found.") % host
+ raise exc.HTTPNotFound(explanation=msg)
try:
instance = self.compute_api.get(context, id, want_objects=True)
@@ -99,6 +108,6 @@ class Evacuate(extensions.ExtensionDescriptor):
updated = "2013-01-06T00:00:00Z"
def get_controller_extensions(self):
- controller = Controller()
+ controller = Controller(self.ext_mgr)
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
diff --git a/nova/api/openstack/compute/contrib/extended_evacuate_find_host.py b/nova/api/openstack/compute/contrib/extended_evacuate_find_host.py
new file mode 100644
index 0000000000..2dfe3faff5
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/extended_evacuate_find_host.py
@@ -0,0 +1,26 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.api.openstack import extensions
+
+
+class Extended_evacuate_find_host(extensions.ExtensionDescriptor):
+ """Enables server evacuation without target host. Scheduler will select
+ one to target.
+ """
+ name = "ExtendedEvacuateFindHost"
+ alias = "os-extended-evacuate-find-host"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "extended_evacuate_find_host/api/v2")
+ updated = "2014-02-12T00:00:00Z"
diff --git a/nova/api/openstack/compute/contrib/extended_ips.py b/nova/api/openstack/compute/contrib/extended_ips.py
index 6aadab5736..6b63b8feb8 100644
--- a/nova/api/openstack/compute/contrib/extended_ips.py
+++ b/nova/api/openstack/compute/contrib/extended_ips.py
@@ -47,7 +47,7 @@ def _extend_server(self, context, server, instance):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedIpsServerTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
@@ -59,7 +59,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedIpsServersTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
@@ -96,7 +96,7 @@ def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(root)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_ips.alias: Extended_ips.namespace})
@@ -105,5 +105,5 @@ def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_ips.alias: Extended_ips.namespace})
diff --git a/nova/api/openstack/compute/contrib/extended_ips_mac.py b/nova/api/openstack/compute/contrib/extended_ips_mac.py
index 79bce3cc76..c076b5747e 100644
--- a/nova/api/openstack/compute/contrib/extended_ips_mac.py
+++ b/nova/api/openstack/compute/contrib/extended_ips_mac.py
@@ -45,7 +45,7 @@ def _extend_server(self, context, server, instance):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedIpsMacServerTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
@@ -57,7 +57,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedIpsMacServersTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
@@ -93,7 +93,7 @@ class ExtendedIpsMacServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_ips_mac.alias: Extended_ips_mac.namespace})
@@ -102,5 +102,5 @@ def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_ips_mac.alias: Extended_ips_mac.namespace})
diff --git a/nova/api/openstack/compute/contrib/extended_networks.py b/nova/api/openstack/compute/contrib/extended_networks.py
new file mode 100644
index 0000000000..f5021a48dc
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/extended_networks.py
@@ -0,0 +1,26 @@
+# Copyright 2014 Nebula, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack import extensions
+
+
+class Extended_networks(extensions.ExtensionDescriptor):
+ """Adds additional fields to networks."""
+
+ name = "ExtendedNetworks"
+ alias = "os-extended-networks"
+ namespace = ("http://docs.openstack.org/compute/ext/extended_networks"
+ "/api/v2")
+ updated = "2014-05-09T00:00:00Z"
diff --git a/nova/api/openstack/compute/contrib/extended_server_attributes.py b/nova/api/openstack/compute/contrib/extended_server_attributes.py
index 9777412197..83370b660d 100644
--- a/nova/api/openstack/compute/contrib/extended_server_attributes.py
+++ b/nova/api/openstack/compute/contrib/extended_server_attributes.py
@@ -39,7 +39,7 @@ def _extend_server(self, context, server, instance):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedServerAttributeTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
@@ -51,7 +51,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedServerAttributesTemplate())
servers = list(resp_obj.obj['servers'])
@@ -92,7 +92,7 @@ def construct(self):
make_server(root)
alias = Extended_server_attributes.alias
namespace = Extended_server_attributes.namespace
- return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace})
class ExtendedServerAttributesTemplate(xmlutil.TemplateBuilder):
@@ -102,4 +102,4 @@ def construct(self):
make_server(elem)
alias = Extended_server_attributes.alias
namespace = Extended_server_attributes.namespace
- return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace})
diff --git a/nova/api/openstack/compute/contrib/extended_status.py b/nova/api/openstack/compute/contrib/extended_status.py
index 5cdd1e8d42..d4be295d36 100644
--- a/nova/api/openstack/compute/contrib/extended_status.py
+++ b/nova/api/openstack/compute/contrib/extended_status.py
@@ -36,7 +36,7 @@ def _extend_server(self, server, instance):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedStatusTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
@@ -48,7 +48,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedStatusesTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
@@ -86,7 +86,7 @@ class ExtendedStatusTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_status.alias: Extended_status.namespace})
@@ -95,5 +95,5 @@ def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_status.alias: Extended_status.namespace})
diff --git a/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py b/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py
index a3dd4a3b6f..1b6350eecd 100644
--- a/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py
+++ b/nova/api/openstack/compute/contrib/extended_virtual_interfaces_net.py
@@ -33,7 +33,7 @@ def construct(self):
elem = xmlutil.SubTemplateElement(root, 'virtual_interface',
selector='virtual_interfaces')
make_vif(elem)
- return xmlutil.SlaveTemplate(root, 1,
+ return xmlutil.SubordinateTemplate(root, 1,
nsmap={Extended_virtual_interfaces_net.alias:
Extended_virtual_interfaces_net.namespace})
@@ -48,7 +48,7 @@ def index(self, req, resp_obj, server_id):
key = "%s:net_id" % Extended_virtual_interfaces_net.alias
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedVirtualInterfaceNetTemplate())
for vif in resp_obj.obj['virtual_interfaces']:
vif1 = self.network_api.get_vif_by_mac_address(context,
diff --git a/nova/api/openstack/compute/contrib/extended_volumes.py b/nova/api/openstack/compute/contrib/extended_volumes.py
index f4af2f3d63..f2608c1c30 100644
--- a/nova/api/openstack/compute/contrib/extended_volumes.py
+++ b/nova/api/openstack/compute/contrib/extended_volumes.py
@@ -39,7 +39,7 @@ def _extend_server(self, context, server, instance):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedVolumesServerTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
@@ -51,7 +51,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ExtendedVolumesServersTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
@@ -90,7 +90,7 @@ class ExtendedVolumesServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_volumes.alias: Extended_volumes.namespace})
@@ -99,5 +99,5 @@ def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Extended_volumes.alias: Extended_volumes.namespace})
diff --git a/nova/api/openstack/compute/contrib/fixed_ips.py b/nova/api/openstack/compute/contrib/fixed_ips.py
index 805f1cbed5..be071f73df 100644
--- a/nova/api/openstack/compute/contrib/fixed_ips.py
+++ b/nova/api/openstack/compute/contrib/fixed_ips.py
@@ -12,12 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
+import webob
import webob.exc
from nova.api.openstack import extensions
from nova import exception
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'fixed_ips')
@@ -74,7 +75,7 @@ def _set_reserved(self, context, address, reserved):
msg = _("Fixed IP %s not found") % address
raise webob.exc.HTTPNotFound(explanation=msg)
- return webob.exc.HTTPAccepted()
+ return webob.Response(status_int=202)
class Fixed_ips(extensions.ExtensionDescriptor):
diff --git a/nova/api/openstack/compute/contrib/flavor_access.py b/nova/api/openstack/compute/contrib/flavor_access.py
index a497f75824..5932406269 100644
--- a/nova/api/openstack/compute/contrib/flavor_access.py
+++ b/nova/api/openstack/compute/contrib/flavor_access.py
@@ -21,8 +21,8 @@
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
soft_authorize = extensions.soft_extension_authorizer('compute',
@@ -46,7 +46,7 @@ def construct(self):
make_flavor(root)
alias = Flavor_access.alias
namespace = Flavor_access.namespace
- return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace})
class FlavorsTemplate(xmlutil.TemplateBuilder):
@@ -56,7 +56,7 @@ def construct(self):
make_flavor(elem)
alias = Flavor_access.alias
namespace = Flavor_access.namespace
- return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={alias: namespace})
class FlavorAccessTemplate(xmlutil.TemplateBuilder):
@@ -65,7 +65,7 @@ def construct(self):
elem = xmlutil.SubTemplateElement(root, 'access',
selector='flavor_access')
make_flavor_access(elem)
- return xmlutil.MasterTemplate(root, 1)
+ return xmlutil.MainTemplate(root, 1)
def _marshall_flavor_access(flavor):
@@ -127,7 +127,7 @@ def _extend_flavor(self, flavor_rval, flavor_ref):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if soft_authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=FlavorTemplate())
db_flavor = req.get_db_flavor(id)
@@ -137,7 +137,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if soft_authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=FlavorsTemplate())
flavors = list(resp_obj.obj['flavors'])
@@ -149,7 +149,7 @@ def detail(self, req, resp_obj):
def create(self, req, body, resp_obj):
context = req.environ['nova.context']
if soft_authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=FlavorTemplate())
db_flavor = req.get_db_flavor(resp_obj.obj['flavor']['id'])
diff --git a/nova/api/openstack/compute/contrib/flavorextraspecs.py b/nova/api/openstack/compute/contrib/flavorextraspecs.py
index 8ba0db9d7f..5bbb23d26f 100644
--- a/nova/api/openstack/compute/contrib/flavorextraspecs.py
+++ b/nova/api/openstack/compute/contrib/flavorextraspecs.py
@@ -15,6 +15,7 @@
"""The instance type extra specs extension."""
+import six
from webob import exc
from nova.api.openstack import extensions
@@ -22,8 +23,8 @@
from nova.api.openstack import xmlutil
from nova.compute import flavors
from nova import exception
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
from nova import utils
authorize = extensions.extension_authorizer('compute', 'flavorextraspecs')
@@ -63,6 +64,10 @@ def _check_extra_specs(self, specs):
try:
flavors.validate_extra_spec_keys(specs.keys())
+ except TypeError:
+ msg = _("Fail to validate provided extra specs keys. "
+ "Expected string")
+ raise exc.HTTPBadRequest(explanation=msg)
except exception.InvalidInput as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
@@ -70,6 +75,11 @@ def _check_extra_specs(self, specs):
try:
utils.check_string_length(key, 'extra_specs key',
min_length=1, max_length=255)
+
+ # NOTE(dims): The following check was added for backwards
+ # compatibility.
+ if (isinstance(value, (int, long, float))):
+ value = six.text_type(value)
utils.check_string_length(value, 'extra_specs value',
max_length=255)
except exception.InvalidInput as error:
diff --git a/nova/api/openstack/compute/contrib/flavormanage.py b/nova/api/openstack/compute/contrib/flavormanage.py
index fe6b170eee..8e7e93ffc1 100644
--- a/nova/api/openstack/compute/contrib/flavormanage.py
+++ b/nova/api/openstack/compute/contrib/flavormanage.py
@@ -18,7 +18,7 @@
from nova.api.openstack import wsgi
from nova.compute import flavors
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'flavormanage')
@@ -39,7 +39,7 @@ def _delete(self, req, id):
try:
flavor = flavors.get_flavor_by_flavor_id(
id, ctxt=context, read_deleted="no")
- except exception.NotFound as e:
+ except exception.FlavorNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
flavors.destroy(flavor['name'])
@@ -56,10 +56,26 @@ def _create(self, req, body):
raise webob.exc.HTTPBadRequest(explanation=msg)
vals = body['flavor']
name = vals.get('name')
+ if name is None:
+ msg = _("A valid name parameter is required")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
flavorid = vals.get('id')
memory = vals.get('ram')
+ if memory is None:
+ msg = _("A valid ram parameter is required")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
vcpus = vals.get('vcpus')
+ if vcpus is None:
+ msg = _("A valid vcpus parameter is required")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
root_gb = vals.get('disk')
+ if root_gb is None:
+ msg = _("A valid disk parameter is required")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
ephemeral_gb = vals.get('OS-FLV-EXT-DATA:ephemeral', 0)
swap = vals.get('swap', 0)
rxtx_factor = vals.get('rxtx_factor', 1.0)
diff --git a/nova/api/openstack/compute/contrib/floating_ip_dns.py b/nova/api/openstack/compute/contrib/floating_ip_dns.py
index 3e5fb9b5e2..771c25dfb7 100644
--- a/nova/api/openstack/compute/contrib/floating_ip_dns.py
+++ b/nova/api/openstack/compute/contrib/floating_ip_dns.py
@@ -20,8 +20,8 @@
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
+from nova.i18n import _
from nova import network
-from nova.openstack.common.gettextutils import _
from nova import utils
diff --git a/nova/api/openstack/compute/contrib/floating_ip_pools.py b/nova/api/openstack/compute/contrib/floating_ip_pools.py
index b1165d1c33..7ca9831f1f 100644
--- a/nova/api/openstack/compute/contrib/floating_ip_pools.py
+++ b/nova/api/openstack/compute/contrib/floating_ip_pools.py
@@ -21,16 +21,16 @@
authorize = extensions.extension_authorizer('compute', 'floating_ip_pools')
-def _translate_floating_ip_view(pool):
+def _translate_floating_ip_view(pool_name):
return {
- 'name': pool['name'],
+ 'name': pool_name,
}
def _translate_floating_ip_pools_view(pools):
return {
- 'floating_ip_pools': [_translate_floating_ip_view(pool)
- for pool in pools]
+ 'floating_ip_pools': [_translate_floating_ip_view(pool_name)
+ for pool_name in pools]
}
diff --git a/nova/api/openstack/compute/contrib/floating_ips.py b/nova/api/openstack/compute/contrib/floating_ips.py
index bc1f18eb56..bb6c342f11 100644
--- a/nova/api/openstack/compute/contrib/floating_ips.py
+++ b/nova/api/openstack/compute/contrib/floating_ips.py
@@ -24,8 +24,9 @@
from nova import compute
from nova.compute import utils as compute_utils
from nova import exception
+from nova.i18n import _
+from nova.i18n import _LW
from nova import network
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
@@ -169,6 +170,8 @@ def create(self, req, body=None):
else:
msg = _("IP allocation over quota.")
raise webob.exc.HTTPForbidden(explanation=msg)
+ except exception.FloatingIpPoolNotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.format_message())
return _translate_floating_ip_view(ip)
@@ -248,8 +251,8 @@ def _add_floating_ip(self, req, id, body):
if not fixed_address:
fixed_address = fixed_ips[0]['address']
if len(fixed_ips) > 1:
- msg = _('multiple fixed_ips exist, using the first: %s')
- LOG.warning(msg, fixed_address)
+ LOG.warn(_LW('multiple fixed_ips exist, using the first: '
+ '%s'), fixed_address)
try:
self.network_api.associate_floating_ip(context, instance,
diff --git a/nova/api/openstack/compute/contrib/floating_ips_bulk.py b/nova/api/openstack/compute/contrib/floating_ips_bulk.py
index 4044ce2bbe..6b75e89e9f 100644
--- a/nova/api/openstack/compute/contrib/floating_ips_bulk.py
+++ b/nova/api/openstack/compute/contrib/floating_ips_bulk.py
@@ -18,8 +18,8 @@
from nova.api.openstack import extensions
from nova import exception
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
CONF = cfg.CONF
@@ -85,8 +85,6 @@ def create(self, req, body):
raise webob.exc.HTTPUnprocessableEntity()
params = body['floating_ips_bulk_create']
- LOG.debug(params)
-
if 'ip_range' not in params:
raise webob.exc.HTTPUnprocessableEntity()
ip_range = params['ip_range']
diff --git a/nova/api/openstack/compute/contrib/fping.py b/nova/api/openstack/compute/contrib/fping.py
index 2b8a753426..28128188f1 100644
--- a/nova/api/openstack/compute/contrib/fping.py
+++ b/nova/api/openstack/compute/contrib/fping.py
@@ -24,7 +24,7 @@
from nova.api.openstack import extensions
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova import utils
authorize = extensions.extension_authorizer('compute', 'fping')
diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py
index 94720773b8..6a08836b71 100644
--- a/nova/api/openstack/compute/contrib/hosts.py
+++ b/nova/api/openstack/compute/contrib/hosts.py
@@ -22,7 +22,7 @@
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -97,39 +97,41 @@ def __init__(self):
@wsgi.serializers(xml=HostIndexTemplate)
def index(self, req):
"""Returns a dict in the format:
- {'hosts': [{'host_name': 'some.host.name',
- 'service': 'cells',
- 'zone': 'internal'},
- {'host_name': 'some.other.host.name',
- 'service': 'cells',
- 'zone': 'internal'},
- {'host_name': 'some.celly.host.name',
- 'service': 'cells',
- 'zone': 'internal'},
- {'host_name': 'console1.host.com',
- 'service': 'consoleauth',
- 'zone': 'internal'},
- {'host_name': 'network1.host.com',
- 'service': 'network',
- 'zone': 'internal'},
- {'host_name': 'netwwork2.host.com',
- 'service': 'network',
- 'zone': 'internal'},
- {'host_name': 'compute1.host.com',
- 'service': 'compute',
- 'zone': 'nova'},
- {'host_name': 'compute2.host.com',
- 'service': 'compute',
- 'zone': 'nova'},
- {'host_name': 'sched1.host.com',
- 'service': 'scheduler',
- 'zone': 'internal'},
- {'host_name': 'sched2.host.com',
- 'service': 'scheduler',
- 'zone': 'internal'},
- {'host_name': 'vol1.host.com',
- 'service': 'volume'},
- 'zone': 'internal']}
+
+ | {'hosts': [{'host_name': 'some.host.name',
+ | 'service': 'cells',
+ | 'zone': 'internal'},
+ | {'host_name': 'some.other.host.name',
+ | 'service': 'cells',
+ | 'zone': 'internal'},
+ | {'host_name': 'some.celly.host.name',
+ | 'service': 'cells',
+ | 'zone': 'internal'},
+ | {'host_name': 'console1.host.com',
+ | 'service': 'consoleauth',
+ | 'zone': 'internal'},
+ | {'host_name': 'network1.host.com',
+ | 'service': 'network',
+ | 'zone': 'internal'},
+ | {'host_name': 'netwwork2.host.com',
+ | 'service': 'network',
+ | 'zone': 'internal'},
+ | {'host_name': 'compute1.host.com',
+ | 'service': 'compute',
+ | 'zone': 'nova'},
+ | {'host_name': 'compute2.host.com',
+ | 'service': 'compute',
+ | 'zone': 'nova'},
+ | {'host_name': 'sched1.host.com',
+ | 'service': 'scheduler',
+ | 'zone': 'internal'},
+ | {'host_name': 'sched2.host.com',
+ | 'service': 'scheduler',
+ | 'zone': 'internal'},
+ | {'host_name': 'vol1.host.com',
+ | 'service': 'volume'},
+ | 'zone': 'internal']}
+
"""
context = req.environ['nova.context']
authorize(context)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/234_placeholder.py b/nova/api/openstack/compute/contrib/hypervisor_status.py
similarity index 61%
rename from nova/db/sqlalchemy/migrate_repo/versions/234_placeholder.py
rename to nova/api/openstack/compute/contrib/hypervisor_status.py
index f5c5483cda..94bcabca48 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/234_placeholder.py
+++ b/nova/api/openstack/compute/contrib/hypervisor_status.py
@@ -1,3 +1,5 @@
+# Copyright 2014 Intel Corp.
+#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -10,17 +12,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-# This is a placeholder for Icehouse backports.
-# Do not use this number for new Juno work. New Juno work starts after
-# all the placeholders.
-#
-# See blueprint backportable-db-migrations-juno
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
+from nova.api.openstack import extensions
-def upgrade(migrate_engine):
- pass
+class Hypervisor_status(extensions.ExtensionDescriptor):
+ """Show hypervisor status."""
-def downgrade(migration_engine):
- pass
+ name = "HypervisorStatus"
+ alias = "os-hypervisor-status"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "hypervisor_status/api/v1.1")
+ updated = "2014-04-17T00:00:00Z"
diff --git a/nova/api/openstack/compute/contrib/hypervisors.py b/nova/api/openstack/compute/contrib/hypervisors.py
index 33d3ad0d06..d2df93fa82 100644
--- a/nova/api/openstack/compute/contrib/hypervisors.py
+++ b/nova/api/openstack/compute/contrib/hypervisors.py
@@ -22,7 +22,8 @@
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova import servicegroup
authorize = extensions.extension_authorizer('compute', 'hypervisors')
@@ -31,6 +32,8 @@
def make_hypervisor(elem, detail):
elem.set('hypervisor_hostname')
elem.set('id')
+ elem.set('state')
+ elem.set('status')
if detail:
elem.set('vcpus')
elem.set('memory_mb')
@@ -52,6 +55,7 @@ def make_hypervisor(elem, detail):
selector='service')
service.set('id')
service.set('host')
+ service.set('disabled_reason')
class HypervisorIndexTemplate(xmlutil.TemplateBuilder):
@@ -128,6 +132,7 @@ class HypervisorsController(object):
def __init__(self, ext_mgr):
self.host_api = compute.HostAPI()
+ self.servicegroup_api = servicegroup.API()
super(HypervisorsController, self).__init__()
self.ext_mgr = ext_mgr
@@ -137,6 +142,13 @@ def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs):
'hypervisor_hostname': hypervisor['hypervisor_hostname'],
}
+ ext_status_loaded = self.ext_mgr.is_loaded('os-hypervisor-status')
+ if ext_status_loaded:
+ alive = self.servicegroup_api.service_is_up(hypervisor['service'])
+ hyp_dict['state'] = 'up' if alive else "down"
+ hyp_dict['status'] = (
+ 'disabled' if hypervisor['service']['disabled'] else 'enabled')
+
if detail and not servers:
fields = ('vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used',
@@ -153,6 +165,9 @@ def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs):
'id': hypervisor['service_id'],
'host': hypervisor['service']['host'],
}
+ if ext_status_loaded:
+ hyp_dict['service'].update(
+ disabled_reason=hypervisor['service']['disabled_reason'])
if servers:
hyp_dict['servers'] = [dict(name=serv['name'], uuid=serv['uuid'])
diff --git a/nova/api/openstack/compute/contrib/image_size.py b/nova/api/openstack/compute/contrib/image_size.py
index c746415170..e54e461307 100644
--- a/nova/api/openstack/compute/contrib/image_size.py
+++ b/nova/api/openstack/compute/contrib/image_size.py
@@ -29,7 +29,7 @@ def construct(self):
root = xmlutil.TemplateElement('images')
elem = xmlutil.SubTemplateElement(root, 'image', selector='images')
make_image(elem)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Image_size.alias: Image_size.namespace})
@@ -37,7 +37,7 @@ class ImageSizeTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('image', selector='image')
make_image(root)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Image_size.alias: Image_size.namespace})
@@ -51,7 +51,7 @@ def _extend_image(self, image, image_cache):
def show(self, req, resp_obj, id):
context = req.environ["nova.context"]
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ImageSizeTemplate())
image_resp = resp_obj.obj['image']
# image guaranteed to be in the cache due to the core API adding
@@ -63,7 +63,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ImagesSizeTemplate())
images_resp = list(resp_obj.obj['images'])
# images guaranteed to be in the cache due to the core API adding
diff --git a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
index d39318a0cd..90805469dc 100644
--- a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
+++ b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
@@ -21,7 +21,7 @@
from nova.api.openstack import extensions
from nova import compute
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova import utils
CONF = cfg.CONF
diff --git a/nova/api/openstack/compute/contrib/keypairs.py b/nova/api/openstack/compute/contrib/keypairs.py
index 1c58675725..dd9c47873a 100644
--- a/nova/api/openstack/compute/contrib/keypairs.py
+++ b/nova/api/openstack/compute/contrib/keypairs.py
@@ -24,7 +24,7 @@
from nova.api.openstack import xmlutil
from nova.compute import api as compute_api
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'keypairs')
@@ -102,9 +102,7 @@ def create(self, req, body):
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
- raise webob.exc.HTTPRequestEntityTooLarge(
- explanation=msg,
- headers={'Retry-After': 0})
+ raise webob.exc.HTTPForbidden(explanation=msg)
except exception.InvalidKeypair as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.KeyPairExists as exc:
diff --git a/nova/api/openstack/compute/contrib/multinic.py b/nova/api/openstack/compute/contrib/multinic.py
index 6887c2ef8e..40a61a778e 100644
--- a/nova/api/openstack/compute/contrib/multinic.py
+++ b/nova/api/openstack/compute/contrib/multinic.py
@@ -22,7 +22,8 @@
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova.i18n import _LE
from nova.openstack.common import log as logging
@@ -52,11 +53,15 @@ def _add_fixed_ip(self, req, id, body):
# Validate the input entity
if 'networkId' not in body['addFixedIp']:
msg = _("Missing 'networkId' argument for addFixedIp")
- raise exc.HTTPUnprocessableEntity(explanation=msg)
+ raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_instance(context, id, want_objects=True)
network_id = body['addFixedIp']['networkId']
- self.compute_api.add_fixed_ip(context, instance, network_id)
+ try:
+ self.compute_api.add_fixed_ip(context, instance, network_id)
+ except exception.NoMoreFixedIps as e:
+ raise exc.HTTPBadRequest(explanation=e.format_message())
+
return webob.Response(status_int=202)
@wsgi.action('removeFixedIp')
@@ -68,7 +73,7 @@ def _remove_fixed_ip(self, req, id, body):
# Validate the input entity
if 'address' not in body['removeFixedIp']:
msg = _("Missing 'address' argument for removeFixedIp")
- raise exc.HTTPUnprocessableEntity(explanation=msg)
+ raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_instance(context, id,
want_objects=True)
@@ -77,7 +82,7 @@ def _remove_fixed_ip(self, req, id, body):
try:
self.compute_api.remove_fixed_ip(context, instance, address)
except exception.FixedIpNotFoundForSpecificInstance:
- LOG.exception(_("Unable to find address %r") % address,
+ LOG.exception(_LE("Unable to find address %r"), address,
instance=instance)
raise exc.HTTPBadRequest()
diff --git a/nova/api/openstack/compute/contrib/networks_associate.py b/nova/api/openstack/compute/contrib/networks_associate.py
index f8005f1f24..751d75b831 100644
--- a/nova/api/openstack/compute/contrib/networks_associate.py
+++ b/nova/api/openstack/compute/contrib/networks_associate.py
@@ -10,13 +10,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+import webob
from webob import exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
+from nova.i18n import _
from nova import network
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -43,7 +44,7 @@ def _disassociate_host_only(self, req, id, body):
msg = _('Disassociate host is not implemented by the configured '
'Network API')
raise exc.HTTPNotImplemented(explanation=msg)
- return exc.HTTPAccepted()
+ return webob.Response(status_int=202)
@wsgi.action("disassociate_project")
def _disassociate_project_only(self, req, id, body):
@@ -60,7 +61,7 @@ def _disassociate_project_only(self, req, id, body):
'configured Network API')
raise exc.HTTPNotImplemented(explanation=msg)
- return exc.HTTPAccepted()
+ return webob.Response(status_int=202)
@wsgi.action("associate_host")
def _associate_host(self, req, id, body):
@@ -78,7 +79,7 @@ def _associate_host(self, req, id, body):
'Network API')
raise exc.HTTPNotImplemented(explanation=msg)
- return exc.HTTPAccepted()
+ return webob.Response(status_int=202)
class Networks_associate(extensions.ExtensionDescriptor):
diff --git a/nova/api/openstack/compute/contrib/os_networks.py b/nova/api/openstack/compute/contrib/os_networks.py
index fbc6b3aa83..6d773282df 100644
--- a/nova/api/openstack/compute/contrib/os_networks.py
+++ b/nova/api/openstack/compute/contrib/os_networks.py
@@ -21,17 +21,19 @@
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
+from nova.i18n import _
+from nova.i18n import _LI
from nova import network
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'networks')
authorize_view = extensions.extension_authorizer('compute',
'networks:view')
+extended_fields = ('mtu', 'dhcp_server', 'enable_dhcp', 'share_address')
-def network_dict(context, network):
+def network_dict(context, network, extended):
fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2',
'cidr_v6', 'gateway_v6', 'label', 'netmask_v6')
admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted',
@@ -45,6 +47,8 @@ def network_dict(context, network):
# are only visible if they are an admin.
if context.is_admin:
fields += admin_fields
+ if extended:
+ fields += extended_fields
result = dict((field, network.get(field)) for field in fields)
uuid = network.get('uuid')
if uuid:
@@ -56,14 +60,19 @@ def network_dict(context, network):
class NetworkController(wsgi.Controller):
- def __init__(self, network_api=None):
+ def __init__(self, network_api=None, ext_mgr=None):
self.network_api = network_api or network.API()
+ if ext_mgr:
+ self.extended = ext_mgr.is_loaded('os-extended-networks')
+ else:
+ self.extended = False
def index(self, req):
context = req.environ['nova.context']
authorize_view(context)
networks = self.network_api.get_all(context)
- result = [network_dict(context, net_ref) for net_ref in networks]
+ result = [network_dict(context, net_ref, self.extended)
+ for net_ref in networks]
return {'networks': result}
@wsgi.action("disassociate")
@@ -81,7 +90,7 @@ def _disassociate_host_and_project(self, req, id, body):
msg = _('Disassociate network is not implemented by the '
'configured Network API')
raise exc.HTTPNotImplemented(explanation=msg)
- return exc.HTTPAccepted()
+ return webob.Response(status_int=202)
def show(self, req, id):
context = req.environ['nova.context']
@@ -92,12 +101,12 @@ def show(self, req, id):
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
- return {'network': network_dict(context, network)}
+ return {'network': network_dict(context, network, self.extended)}
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
- LOG.info(_("Deleting network with id %s") % id)
+ LOG.info(_LI("Deleting network with id %s"), id)
try:
self.network_api.delete(context, id)
except exception.NetworkInUse as e:
@@ -105,14 +114,14 @@ def delete(self, req, id):
except exception.NetworkNotFound:
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
- return exc.HTTPAccepted()
+ return webob.Response(status_int=202)
def create(self, req, body):
context = req.environ['nova.context']
authorize(context)
def bad(e):
- return exc.HTTPUnprocessableEntity(explanation=e)
+ return exc.HTTPBadRequest(explanation=e)
if not (body and body.get("network")):
raise bad(_("Missing network in body"))
@@ -125,13 +134,31 @@ def bad(e):
if not cidr:
raise bad(_("Network cidr or cidr_v6 is required"))
- LOG.debug("Creating network with label %s", params["label"])
+ if params.get("project_id") == "":
+ params["project_id"] = None
- params["num_networks"] = 1
- params["network_size"] = netaddr.IPNetwork(cidr).size
+ LOG.debug("Creating network with label %s", params["label"])
- network = self.network_api.create(context, **params)[0]
- return {"network": network_dict(context, network)}
+ try:
+ params["num_networks"] = 1
+ try:
+ params["network_size"] = netaddr.IPNetwork(cidr).size
+ except netaddr.AddrFormatError:
+ raise exception.InvalidCidr(cidr=cidr)
+ if not self.extended:
+ create_params = ('allowed_start', 'allowed_end')
+ for field in extended_fields + create_params:
+ if field in params:
+ del params[field]
+
+ network = self.network_api.create(context, **params)[0]
+ except exception.NovaException as ex:
+ if ex.code == 400:
+ raise bad(ex.format_message())
+ elif ex.code == 409:
+ raise exc.HTTPConflict(explanation=ex.format_message())
+ raise
+ return {"network": network_dict(context, network, self.extended)}
def add(self, req, body):
context = req.environ['nova.context']
@@ -176,7 +203,7 @@ def get_resources(self):
collection_actions = {'add': 'POST'}
res = extensions.ResourceExtension(
'os-networks',
- NetworkController(),
+ NetworkController(ext_mgr=self.ext_mgr),
member_actions=member_actions,
collection_actions=collection_actions)
return [res]
diff --git a/nova/api/openstack/compute/contrib/os_tenant_networks.py b/nova/api/openstack/compute/contrib/os_tenant_networks.py
index 90ad2ba529..5e4eabd787 100644
--- a/nova/api/openstack/compute/contrib/os_tenant_networks.py
+++ b/nova/api/openstack/compute/contrib/os_tenant_networks.py
@@ -17,13 +17,16 @@
import netaddr
import netaddr.core as netexc
from oslo.config import cfg
+import webob
from webob import exc
from nova.api.openstack import extensions
from nova import context as nova_context
from nova import exception
+from nova.i18n import _
+from nova.i18n import _LE
+from nova.i18n import _LI
import nova.network
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import quota
@@ -65,7 +68,7 @@
def network_dict(network):
return {"id": network.get("uuid") or network.get("id"),
- "cidr": network.get("cidr"),
+ "cidr": str(network.get("cidr")),
"label": network.get("label")}
@@ -80,7 +83,7 @@ def _refresh_default_networks(self):
try:
self._default_networks = self._get_default_networks()
except Exception:
- LOG.exception(_("Failed to get default networks"))
+ LOG.exception(_LE("Failed to get default networks"))
def _get_default_networks(self):
project_id = CONF.neutron_default_tenant_id
@@ -94,7 +97,7 @@ def _get_default_networks(self):
def index(self, req):
context = req.environ['nova.context']
authorize(context)
- networks = self.network_api.get_all(context)
+ networks = list(self.network_api.get_all(context))
if not self._default_networks:
self._refresh_default_networks()
networks.extend(self._default_networks)
@@ -114,29 +117,38 @@ def show(self, req, id):
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
+ reservation = None
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=-1)
except Exception:
reservation = None
- LOG.exception(_("Failed to update usages deallocating "
- "network."))
+ LOG.exception(_LE("Failed to update usages deallocating "
+ "network."))
- LOG.info(_("Deleting network with id %s") % id)
+ LOG.info(_LI("Deleting network with id %s"), id)
+
+ def _rollback_quota(reservation):
+ if CONF.enable_network_quota and reservation:
+ QUOTAS.rollback(context, reservation)
try:
self.network_api.delete(context, id)
- if CONF.enable_network_quota and reservation:
- QUOTAS.commit(context, reservation)
- response = exc.HTTPAccepted()
except exception.PolicyNotAuthorized as e:
+ _rollback_quota(reservation)
raise exc.HTTPForbidden(explanation=str(e))
except exception.NetworkInUse as e:
+ _rollback_quota(reservation)
raise exc.HTTPConflict(explanation=e.format_message())
except exception.NetworkNotFound:
+ _rollback_quota(reservation)
msg = _("Network not found")
raise exc.HTTPNotFound(explanation=msg)
+ if CONF.enable_network_quota and reservation:
+ QUOTAS.commit(context, reservation)
+ response = webob.Response(status_int=202)
+
return response
def create(self, req, body):
diff --git a/nova/api/openstack/compute/contrib/quota_classes.py b/nova/api/openstack/compute/contrib/quota_classes.py
index 755bdefc9c..bb034a0b4d 100644
--- a/nova/api/openstack/compute/contrib/quota_classes.py
+++ b/nova/api/openstack/compute/contrib/quota_classes.py
@@ -21,7 +21,7 @@
import nova.context
from nova import db
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova import quota
from nova import utils
diff --git a/nova/api/openstack/compute/contrib/quotas.py b/nova/api/openstack/compute/contrib/quotas.py
index 46bd353924..8468a41d86 100644
--- a/nova/api/openstack/compute/contrib/quotas.py
+++ b/nova/api/openstack/compute/contrib/quotas.py
@@ -20,9 +20,9 @@
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
import nova.context
-from nova import db
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova import quota
@@ -196,11 +196,11 @@ def update(self, req, id, body):
maximum = settable_quotas[key]['maximum']
self._validate_quota_limit(value, minimum, maximum)
try:
- db.quota_create(context, project_id, key, value,
- user_id=user_id)
+ objects.Quotas.create_limit(context, project_id,
+ key, value, user_id=user_id)
except exception.QuotaExists:
- db.quota_update(context, project_id, key, value,
- user_id=user_id)
+ objects.Quotas.update_limit(context, project_id,
+ key, value, user_id=user_id)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
return {'quota_set': self._get_quotas(context, id, user_id=user_id)}
diff --git a/nova/api/openstack/compute/contrib/rescue.py b/nova/api/openstack/compute/contrib/rescue.py
index b5f9755b21..47ff29f71d 100644
--- a/nova/api/openstack/compute/contrib/rescue.py
+++ b/nova/api/openstack/compute/contrib/rescue.py
@@ -23,7 +23,7 @@
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova import utils
@@ -74,10 +74,6 @@ def _rescue(self, req, id, body):
except exception.InstanceNotRescuable as non_rescuable:
raise exc.HTTPBadRequest(
explanation=non_rescuable.format_message())
- except NotImplementedError:
- msg = _("The rescue operation is not implemented by this "
- "cloud.")
- raise exc.HTTPNotImplemented(explanation=msg)
return {'adminPass': password}
@@ -94,9 +90,6 @@ def _unrescue(self, req, id, body):
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unrescue')
- except NotImplementedError:
- msg = _("The unrescue operation is not implemented by this cloud.")
- raise exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
diff --git a/nova/api/openstack/compute/contrib/scheduler_hints.py b/nova/api/openstack/compute/contrib/scheduler_hints.py
index 2b2c129961..c1d69413a4 100644
--- a/nova/api/openstack/compute/contrib/scheduler_hints.py
+++ b/nova/api/openstack/compute/contrib/scheduler_hints.py
@@ -16,7 +16,7 @@
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
class SchedulerHintsController(wsgi.Controller):
diff --git a/nova/api/openstack/compute/contrib/security_group_default_rules.py b/nova/api/openstack/compute/contrib/security_group_default_rules.py
index 6216cc3365..fa73368527 100644
--- a/nova/api/openstack/compute/contrib/security_group_default_rules.py
+++ b/nova/api/openstack/compute/contrib/security_group_default_rules.py
@@ -20,8 +20,8 @@
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
+from nova.i18n import _
from nova.network.security_group import openstack_driver
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import xmlutils
diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py
index a9e4fe2c86..102d2fad2b 100644
--- a/nova/api/openstack/compute/contrib/security_groups.py
+++ b/nova/api/openstack/compute/contrib/security_groups.py
@@ -17,7 +17,6 @@
"""The security groups extension."""
import contextlib
-import json
import webob
from webob import exc
@@ -29,9 +28,10 @@
from nova import compute
from nova.compute import api as compute_api
from nova import exception
+from nova.i18n import _
from nova.network.security_group import neutron_driver
from nova.network.security_group import openstack_driver
-from nova.openstack.common.gettextutils import _
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import xmlutils
from nova.virt import netutils
@@ -194,7 +194,7 @@ def translate_exceptions():
raise exc.HTTPNotFound(explanation=msg)
except exception.SecurityGroupLimitExceeded as exp:
msg = exp.format_message()
- raise exc.HTTPRequestEntityTooLarge(explanation=msg)
+ raise exc.HTTPForbidden(explanation=msg)
except exception.NoUniqueMatch as exp:
msg = exp.format_message()
raise exc.HTTPConflict(explanation=msg)
@@ -209,7 +209,12 @@ def __init__(self):
self.compute_api = compute.API(
security_group_api=self.security_group_api)
- def _format_security_group_rule(self, context, rule):
+ def _format_security_group_rule(self, context, rule, group_rule_data=None):
+ """Return a secuity group rule in desired API response format.
+
+ If group_rule_data is passed in that is used rather than querying
+ for it.
+ """
sg_rule = {}
sg_rule['id'] = rule['id']
sg_rule['parent_group_id'] = rule['parent_group_id']
@@ -236,6 +241,8 @@ def _format_security_group_rule(self, context, rule):
return
sg_rule['group'] = {'name': source_group.get('name'),
'tenant_id': source_group.get('project_id')}
+ elif group_rule_data:
+ sg_rule['group'] = group_rule_data
else:
sg_rule['ip_range'] = {'cidr': rule['cidr']}
return sg_rule
@@ -395,23 +402,22 @@ def create(self, req, body):
msg = _("Bad prefix for network in cidr %s") % new_rule['cidr']
raise exc.HTTPBadRequest(explanation=msg)
+ group_rule_data = None
with translate_exceptions():
+ if sg_rule.get('group_id'):
+ source_group = self.security_group_api.get(
+ context, id=sg_rule['group_id'])
+ group_rule_data = {'name': source_group.get('name'),
+ 'tenant_id': source_group.get('project_id')}
+
security_group_rule = (
self.security_group_api.create_security_group_rule(
context, security_group, new_rule))
formatted_rule = self._format_security_group_rule(context,
- security_group_rule)
- if formatted_rule:
- return {"security_group_rule": formatted_rule}
-
- # TODO(arosen): if we first look up the security group information for
- # the group_id before creating the rule we can avoid the case that
- # the remote group (group_id) has been deleted when we go to look
- # up it's name.
- with translate_exceptions():
- raise exception.SecurityGroupNotFound(
- security_group_id=sg_rule['group_id'])
+ security_group_rule,
+ group_rule_data)
+ return {"security_group_rule": formatted_rule}
def _rule_args_to_dict(self, context, to_port=None, from_port=None,
ip_protocol=None, cidr=None, group_id=None):
@@ -559,7 +565,7 @@ def _extend_servers(self, req, servers):
else:
try:
# try converting to json
- req_obj = json.loads(req.body)
+ req_obj = jsonutils.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][key] = req_obj['server'].get(
diff --git a/nova/api/openstack/compute/contrib/server_diagnostics.py b/nova/api/openstack/compute/contrib/server_diagnostics.py
index 9da8e1e753..330cff14d7 100644
--- a/nova/api/openstack/compute/contrib/server_diagnostics.py
+++ b/nova/api/openstack/compute/contrib/server_diagnostics.py
@@ -21,6 +21,7 @@
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
+from nova.i18n import _
authorize = extensions.extension_authorizer('compute', 'server_diagnostics')
@@ -37,21 +38,27 @@ def construct(self):
class ServerDiagnosticsController(object):
+ def __init__(self):
+ self.compute_api = compute.API()
+
@wsgi.serializers(xml=ServerDiagnosticsTemplate)
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
- compute_api = compute.API()
try:
- instance = compute_api.get(context, server_id, want_objects=True)
+ instance = self.compute_api.get(context, server_id,
+ want_objects=True)
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
try:
- return compute_api.get_diagnostics(context, instance)
+ return self.compute_api.get_diagnostics(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'get_diagnostics')
+ except NotImplementedError:
+ msg = _("Unable to get diagnostics, functionality not implemented")
+ raise webob.exc.HTTPNotImplemented(explanation=msg)
class Server_diagnostics(extensions.ExtensionDescriptor):
@@ -65,7 +72,7 @@ class Server_diagnostics(extensions.ExtensionDescriptor):
def get_resources(self):
parent_def = {'member_name': 'server', 'collection_name': 'servers'}
- #NOTE(bcwaldon): This should be prefixed with 'os-'
+ # NOTE(bcwaldon): This should be prefixed with 'os-'
ext = extensions.ResourceExtension('diagnostics',
ServerDiagnosticsController(),
parent=parent_def)
diff --git a/nova/api/openstack/compute/contrib/server_external_events.py b/nova/api/openstack/compute/contrib/server_external_events.py
index 97dd23b532..9c3cd4f27f 100644
--- a/nova/api/openstack/compute/contrib/server_external_events.py
+++ b/nova/api/openstack/compute/contrib/server_external_events.py
@@ -19,9 +19,9 @@
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
+from nova.i18n import _
from nova import objects
from nova.objects import external_event as external_event_obj
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
@@ -70,8 +70,9 @@ def create(self, req, body):
context = req.environ['nova.context']
authorize(context, action='create')
- events = []
- accepted = []
+ response_events = []
+ accepted_events = []
+ accepted_instances = set()
instances = {}
result = 200
@@ -101,8 +102,8 @@ def create(self, req, body):
raise webob.exc.HTTPBadRequest(
_('Invalid event status `%s\'') % event.status)
- events.append(_event)
- if event.instance_uuid not in instances:
+ instance = instances.get(event.instance_uuid)
+ if not instance:
try:
instance = objects.Instance.get_by_uuid(
context, event.instance_uuid)
@@ -115,24 +116,41 @@ def create(self, req, body):
_event['code'] = 404
result = 207
- if event.instance_uuid in instances:
- accepted.append(event)
- _event['code'] = 200
- LOG.audit(_('Create event %(name)s:%(tag)s for instance '
- '%(instance_uuid)s'),
- dict(event.iteritems()))
-
- if accepted:
- self.compute_api.external_instance_event(context,
- instances.values(),
- accepted)
+ # NOTE: before accepting the event, make sure the instance
+ # for which the event is sent is assigned to a host; otherwise
+ # it will not be possible to dispatch the event
+ if instance:
+ if instance.host:
+ accepted_events.append(event)
+ accepted_instances.add(instance)
+ LOG.audit(_('Creating event %(name)s:%(tag)s for instance '
+ '%(instance_uuid)s'),
+ dict(event.iteritems()))
+ # NOTE: as the event is processed asynchronously verify
+ # whether 202 is a more suitable response code than 200
+ _event['status'] = 'completed'
+ _event['code'] = 200
+ else:
+ LOG.debug("Unable to find a host for instance "
+ "%(instance)s. Dropping event %(event)s",
+ {'instance': event.instance_uuid,
+ 'event': event.name})
+ _event['status'] = 'failed'
+ _event['code'] = 422
+ result = 207
+
+ response_events.append(_event)
+
+ if accepted_events:
+ self.compute_api.external_instance_event(
+ context, accepted_instances, accepted_events)
else:
msg = _('No instances found for any event')
raise webob.exc.HTTPNotFound(explanation=msg)
# FIXME(cyeoh): This needs some infrastructure support so that
# we have a general way to do this
- robj = wsgi.ResponseObject({'events': events})
+ robj = wsgi.ResponseObject({'events': response_events})
robj._code = result
return robj
diff --git a/nova/api/openstack/compute/contrib/server_groups.py b/nova/api/openstack/compute/contrib/server_groups.py
index 0d10596ecd..fdb2d3d0ce 100644
--- a/nova/api/openstack/compute/contrib/server_groups.py
+++ b/nova/api/openstack/compute/contrib/server_groups.py
@@ -23,8 +23,8 @@
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
import nova.exception
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
from nova import utils
# NOTE(russellb) There is one other policy, 'legacy', but we don't allow that
@@ -137,7 +137,9 @@ def _format_server_group(self, context, group):
server_group['id'] = group.uuid
server_group['name'] = group.name
server_group['policies'] = group.policies or []
- server_group['metadata'] = group.metadetails or {}
+ # NOTE(danms): This has been exposed to the user, but never used.
+ # Since we can't remove it, just make sure it's always empty.
+ server_group['metadata'] = {}
members = []
if group.members:
# Display the instances that are not deleted.
diff --git a/nova/api/openstack/compute/contrib/server_list_multi_status.py b/nova/api/openstack/compute/contrib/server_list_multi_status.py
new file mode 100644
index 0000000000..bdcb2f883f
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/server_list_multi_status.py
@@ -0,0 +1,25 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack import extensions
+
+
+class Server_list_multi_status(extensions.ExtensionDescriptor):
+ """Allow to specify multiple status values concurrently in the servers
+ list API..
+ """
+
+ name = "ServerListMultiStatus"
+ alias = "os-server-list-multi-status"
+ namespace = ("http://docs.openstack.org/compute/ext/"
+ "os-server-list-multi-status/api/v2")
+ updated = "2014-05-11T00:00:00Z"
diff --git a/nova/api/openstack/compute/contrib/server_start_stop.py b/nova/api/openstack/compute/contrib/server_start_stop.py
index d08f239e7f..96c9f11c58 100644
--- a/nova/api/openstack/compute/contrib/server_start_stop.py
+++ b/nova/api/openstack/compute/contrib/server_start_stop.py
@@ -18,8 +18,8 @@
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
diff --git a/nova/api/openstack/compute/contrib/server_usage.py b/nova/api/openstack/compute/contrib/server_usage.py
index 4dd5aa278a..9080d1392b 100644
--- a/nova/api/openstack/compute/contrib/server_usage.py
+++ b/nova/api/openstack/compute/contrib/server_usage.py
@@ -41,7 +41,7 @@ def _extend_server(self, server, instance):
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ServerUsageTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
@@ -53,7 +53,7 @@ def show(self, req, resp_obj, id):
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
- # Attach our slave template to the response object
+ # Attach our subordinate template to the response object
resp_obj.attach(xml=ServerUsagesTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
@@ -89,7 +89,7 @@ class ServerUsageTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Server_usage.alias: Server_usage.namespace})
@@ -98,5 +98,5 @@ def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
- return xmlutil.SlaveTemplate(root, 1, nsmap={
+ return xmlutil.SubordinateTemplate(root, 1, nsmap={
Server_usage.alias: Server_usage.namespace})
diff --git a/nova/api/openstack/compute/contrib/services.py b/nova/api/openstack/compute/contrib/services.py
index 0c1509ab47..6ec91e6b01 100644
--- a/nova/api/openstack/compute/contrib/services.py
+++ b/nova/api/openstack/compute/contrib/services.py
@@ -20,7 +20,7 @@
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova import servicegroup
from nova import utils
diff --git a/nova/api/openstack/compute/contrib/shelve.py b/nova/api/openstack/compute/contrib/shelve.py
index 76d73e111d..d5532ca9bd 100644
--- a/nova/api/openstack/compute/contrib/shelve.py
+++ b/nova/api/openstack/compute/contrib/shelve.py
@@ -22,7 +22,7 @@
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
auth_shelve = exts.extension_authorizer('compute', 'shelve')
diff --git a/nova/api/openstack/compute/contrib/simple_tenant_usage.py b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
index 6b9c25cf9b..f261428fef 100644
--- a/nova/api/openstack/compute/contrib/simple_tenant_usage.py
+++ b/nova/api/openstack/compute/contrib/simple_tenant_usage.py
@@ -23,9 +23,9 @@
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
+from nova.i18n import _
from nova import objects
from nova.objects import instance as instance_obj
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
authorize_show = extensions.extension_authorizer('compute',
@@ -218,17 +218,18 @@ def _parse_datetime(self, dtstr):
value = timeutils.utcnow()
elif isinstance(dtstr, datetime.datetime):
value = dtstr
- for fmt in ["%Y-%m-%dT%H:%M:%S",
- "%Y-%m-%dT%H:%M:%S.%f",
- "%Y-%m-%d %H:%M:%S.%f"]:
- try:
- value = parse_strtime(dtstr, fmt)
- break
- except exception.InvalidStrTime:
- pass
else:
- msg = _("Datetime is in invalid format")
- raise exception.InvalidStrTime(reason=msg)
+ for fmt in ["%Y-%m-%dT%H:%M:%S",
+ "%Y-%m-%dT%H:%M:%S.%f",
+ "%Y-%m-%d %H:%M:%S.%f"]:
+ try:
+ value = parse_strtime(dtstr, fmt)
+ break
+ except exception.InvalidStrTime:
+ pass
+ else:
+ msg = _("Datetime is in invalid format")
+ raise exception.InvalidStrTime(reason=msg)
# NOTE(mriedem): Instance object DateTime fields are timezone-aware
# so we have to force UTC timezone for comparing this datetime against
diff --git a/nova/api/openstack/compute/contrib/used_limits.py b/nova/api/openstack/compute/contrib/used_limits.py
index 12b34cd265..4cfd3948dc 100644
--- a/nova/api/openstack/compute/contrib/used_limits.py
+++ b/nova/api/openstack/compute/contrib/used_limits.py
@@ -61,11 +61,11 @@ def index(self, req, resp_obj):
'totalSecurityGroupsUsed': 'security_groups',
}
used_limits = {}
- for display_name, quota in quota_map.iteritems():
- if quota in quotas:
- reserved = (quotas[quota]['reserved']
+ for display_name, key in quota_map.iteritems():
+ if key in quotas:
+ reserved = (quotas[key]['reserved']
if self._reserved(req) else 0)
- used_limits[display_name] = quotas[quota]['in_use'] + reserved
+ used_limits[display_name] = quotas[key]['in_use'] + reserved
resp_obj.obj['limits']['absolute'].update(used_limits)
diff --git a/nova/api/openstack/compute/contrib/virtual_interfaces.py b/nova/api/openstack/compute/contrib/virtual_interfaces.py
index a891ce6741..d071df461e 100644
--- a/nova/api/openstack/compute/contrib/virtual_interfaces.py
+++ b/nova/api/openstack/compute/contrib/virtual_interfaces.py
@@ -59,7 +59,8 @@ def __init__(self):
def _items(self, req, server_id, entity_maker):
"""Returns a list of VIFs, transformed through entity_maker."""
context = req.environ['nova.context']
- instance = common.get_instance(self.compute_api, context, server_id)
+ instance = common.get_instance(self.compute_api, context, server_id,
+ want_objects=True)
vifs = self.network_api.get_vifs_by_instance(context, instance)
limited_list = common.limited(vifs, req)
diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py
index 8a613c8bcc..b2fa26f5e0 100644
--- a/nova/api/openstack/compute/contrib/volumes.py
+++ b/nova/api/openstack/compute/contrib/volumes.py
@@ -24,8 +24,8 @@
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import uuidutils
diff --git a/nova/api/openstack/compute/extensions.py b/nova/api/openstack/compute/extensions.py
index af3e4af757..24ea65234f 100644
--- a/nova/api/openstack/compute/extensions.py
+++ b/nova/api/openstack/compute/extensions.py
@@ -16,7 +16,7 @@
from oslo.config import cfg
from nova.api.openstack import extensions as base_extensions
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
ext_opts = [
diff --git a/nova/api/openstack/compute/flavors.py b/nova/api/openstack/compute/flavors.py
index 2aa8a32d24..3871131beb 100644
--- a/nova/api/openstack/compute/flavors.py
+++ b/nova/api/openstack/compute/flavors.py
@@ -20,7 +20,7 @@
from nova.api.openstack import xmlutil
from nova.compute import flavors
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import strutils
from nova import utils
diff --git a/nova/api/openstack/compute/image_metadata.py b/nova/api/openstack/compute/image_metadata.py
index 4f871762d0..ec67aa4d3e 100644
--- a/nova/api/openstack/compute/image_metadata.py
+++ b/nova/api/openstack/compute/image_metadata.py
@@ -18,20 +18,22 @@
from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova import exception
-from nova.image import glance
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+import nova.image
class Controller(object):
"""The image metadata API controller for the OpenStack API."""
def __init__(self):
- self.image_service = glance.get_default_image_service()
+ self.image_api = nova.image.API()
def _get_image(self, context, image_id):
try:
- return self.image_service.show(context, image_id)
- except exception.NotFound:
+ return self.image_api.get(context, image_id)
+ except exception.ImageNotAuthorized as e:
+ raise exc.HTTPForbidden(explanation=e.format_message())
+ except exception.ImageNotFound:
msg = _("Image not found.")
raise exc.HTTPNotFound(explanation=msg)
@@ -62,7 +64,8 @@ def create(self, req, image_id, body):
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
- image = self.image_service.update(context, image_id, image, None)
+ image = self.image_api.update(context, image_id, image, data=None,
+ purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=image['properties'])
@@ -90,7 +93,8 @@ def update(self, req, image_id, id, body):
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
- self.image_service.update(context, image_id, image, None)
+ self.image_api.update(context, image_id, image, data=None,
+ purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(meta=meta)
@@ -104,7 +108,8 @@ def update_all(self, req, image_id, body):
common.check_img_metadata_properties_quota(context, metadata)
image['properties'] = metadata
try:
- self.image_service.update(context, image_id, image, None)
+ self.image_api.update(context, image_id, image, data=None,
+ purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=metadata)
@@ -118,7 +123,8 @@ def delete(self, req, image_id, id):
raise exc.HTTPNotFound(explanation=msg)
image['properties'].pop(id)
try:
- self.image_service.update(context, image_id, image, None)
+ self.image_api.update(context, image_id, image, data=None,
+ purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
diff --git a/nova/api/openstack/compute/images.py b/nova/api/openstack/compute/images.py
index 13a3c0deba..0763bc7354 100644
--- a/nova/api/openstack/compute/images.py
+++ b/nova/api/openstack/compute/images.py
@@ -20,8 +20,8 @@
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
-import nova.image.glance
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+import nova.image
import nova.utils
@@ -89,15 +89,9 @@ class Controller(wsgi.Controller):
_view_builder_class = views_images.ViewBuilder
- def __init__(self, image_service=None, **kwargs):
- """Initialize new `ImageController`.
-
- :param image_service: `nova.image.glance:GlanceImageService`
-
- """
+ def __init__(self, **kwargs):
super(Controller, self).__init__(**kwargs)
- self._image_service = (image_service or
- nova.image.glance.get_default_image_service())
+ self._image_api = nova.image.API()
def _get_filters(self, req):
"""Return a dictionary of query param filters from the request.
@@ -136,7 +130,7 @@ def show(self, req, id):
context = req.environ['nova.context']
try:
- image = self._image_service.show(context, id)
+ image = self._image_api.get(context, id)
except (exception.NotFound, exception.InvalidImageRef):
explanation = _("Image not found.")
raise webob.exc.HTTPNotFound(explanation=explanation)
@@ -152,7 +146,7 @@ def delete(self, req, id):
"""
context = req.environ['nova.context']
try:
- self._image_service.delete(context, id)
+ self._image_api.delete(context, id)
except exception.ImageNotFound:
explanation = _("Image not found.")
raise webob.exc.HTTPNotFound(explanation=explanation)
@@ -178,8 +172,8 @@ def index(self, req):
params[key] = val
try:
- images = self._image_service.detail(context, filters=filters,
- **page_params)
+ images = self._image_api.get_all(context, filters=filters,
+ **page_params)
except exception.Invalid as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return self._view_builder.index(req, images)
@@ -198,8 +192,8 @@ def detail(self, req):
for key, val in page_params.iteritems():
params[key] = val
try:
- images = self._image_service.detail(context, filters=filters,
- **page_params)
+ images = self._image_api.get_all(context, filters=filters,
+ **page_params)
except exception.Invalid as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
diff --git a/nova/api/openstack/compute/ips.py b/nova/api/openstack/compute/ips.py
index 3117b6b498..7474145b95 100644
--- a/nova/api/openstack/compute/ips.py
+++ b/nova/api/openstack/compute/ips.py
@@ -20,7 +20,7 @@
from nova.api.openstack.compute.views import addresses as view_addresses
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
def make_network(elem):
@@ -68,12 +68,6 @@ def _get_instance(self, context, server_id):
raise exc.HTTPNotFound(explanation=msg)
return instance
- def create(self, req, server_id, body):
- raise exc.HTTPNotImplemented()
-
- def delete(self, req, server_id, id):
- raise exc.HTTPNotImplemented()
-
@wsgi.serializers(xml=AddressesTemplate)
def index(self, req, server_id):
context = req.environ["nova.context"]
diff --git a/nova/api/openstack/compute/limits.py b/nova/api/openstack/compute/limits.py
index 90194088ac..0b52af405d 100644
--- a/nova/api/openstack/compute/limits.py
+++ b/nova/api/openstack/compute/limits.py
@@ -44,7 +44,7 @@
from nova.api.openstack.compute.views import limits as limits_views
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova import quota
@@ -91,7 +91,8 @@ class LimitsController(object):
def index(self, req):
"""Return all global and rate limit information."""
context = req.environ['nova.context']
- quotas = QUOTAS.get_project_quotas(context, context.project_id,
+ project_id = req.params.get('tenant_id', context.project_id)
+ quotas = QUOTAS.get_project_quotas(context, project_id,
usages=False)
abs_limits = dict((k, v['limit']) for k, v in quotas.items())
rate_limits = req.environ.get("nova.limits", [])
diff --git a/nova/api/openstack/compute/plugins/__init__.py b/nova/api/openstack/compute/plugins/__init__.py
index c9f8605e2b..73857e2541 100644
--- a/nova/api/openstack/compute/plugins/__init__.py
+++ b/nova/api/openstack/compute/plugins/__init__.py
@@ -14,7 +14,7 @@
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/api/openstack/compute/plugins/v3/access_ips.py b/nova/api/openstack/compute/plugins/v3/access_ips.py
index 3804c031c2..3eca0d555d 100644
--- a/nova/api/openstack/compute/plugins/v3/access_ips.py
+++ b/nova/api/openstack/compute/plugins/v3/access_ips.py
@@ -16,12 +16,10 @@
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common import log as logging
+from nova.i18n import _
from nova import utils
ALIAS = "os-access-ips"
-LOG = logging.getLogger(__name__)
authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
@@ -90,7 +88,12 @@ def get_controller_extensions(self):
def get_resources(self):
return []
- def server_create(self, server_dict, create_kwargs):
+ # NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
+ # parameter as this is placed to handle scheduler_hint extension for V2.1.
+ # making 'body_deprecated_param' as optional to avoid changes for
+ # server_update & server_rebuild
+ def server_create(self, server_dict, create_kwargs,
+ body_deprecated_param=None):
if AccessIPs.v4_key in server_dict:
access_ip_v4 = server_dict.get(AccessIPs.v4_key)
if access_ip_v4:
diff --git a/nova/api/openstack/compute/plugins/v3/admin_actions.py b/nova/api/openstack/compute/plugins/v3/admin_actions.py
index 326d9e7642..a5939be545 100644
--- a/nova/api/openstack/compute/plugins/v3/admin_actions.py
+++ b/nova/api/openstack/compute/plugins/v3/admin_actions.py
@@ -16,18 +16,19 @@
from webob import exc
from nova.api.openstack import common
+from nova.api.openstack.compute.schemas.v3 import reset_server_state
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
+from nova.api import validation
from nova import compute
from nova.compute import vm_states
from nova import exception
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common import log as logging
-LOG = logging.getLogger(__name__)
ALIAS = "os-admin-actions"
# States usable in resetState action
+# NOTE: It is necessary to update the schema of nova/api/openstack/compute/
+# schemas/v3/reset_server_state.py, when updating this state_map.
state_map = dict(active=vm_states.ACTIVE, error=vm_states.ERROR)
@@ -71,18 +72,14 @@ def _inject_network_info(self, req, id, body):
@extensions.expected_errors((400, 404))
@wsgi.action('reset_state')
+ @validation.schema(reset_server_state.reset_state)
def _reset_state(self, req, id, body):
"""Permit admins to reset the state of a server."""
context = req.environ["nova.context"]
authorize(context, 'reset_state')
# Identify the desired state from the body
- try:
- state = state_map[body["reset_state"]["state"]]
- except (TypeError, KeyError):
- msg = _("Desired state must be specified. Valid states "
- "are: %s") % ', '.join(sorted(state_map.keys()))
- raise exc.HTTPBadRequest(explanation=msg)
+ state = state_map[body["reset_state"]["state"]]
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
diff --git a/nova/api/openstack/compute/plugins/v3/admin_password.py b/nova/api/openstack/compute/plugins/v3/admin_password.py
index 9265811bb2..4bb45bbe1c 100644
--- a/nova/api/openstack/compute/plugins/v3/admin_password.py
+++ b/nova/api/openstack/compute/plugins/v3/admin_password.py
@@ -21,7 +21,7 @@
from nova.api import validation
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
ALIAS = "os-admin-password"
@@ -34,15 +34,18 @@ def __init__(self, *args, **kwargs):
super(AdminPasswordController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
- @wsgi.action('change_password')
- @wsgi.response(204)
+ # TODO(eliqiao): Here should be 204(No content) instead of 202 by v2.1
+ # +micorversions because the password has been changed when returning
+ # a response.
+ @wsgi.action('changePassword')
+ @wsgi.response(202)
@extensions.expected_errors((400, 404, 409, 501))
@validation.schema(admin_password.change_password)
def change_password(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
- password = body['change_password']['admin_password']
+ password = body['changePassword']['adminPass']
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
@@ -51,7 +54,7 @@ def change_password(self, req, id, body):
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as e:
raise common.raise_http_conflict_for_instance_invalid_state(
- e, 'change_password')
+ e, 'changePassword')
except NotImplementedError:
msg = _("Unable to set password on instance")
raise exc.HTTPNotImplemented(explanation=msg)
diff --git a/nova/api/openstack/compute/plugins/v3/aggregates.py b/nova/api/openstack/compute/plugins/v3/aggregates.py
index dde3abc92b..7aa3cc4550 100644
--- a/nova/api/openstack/compute/plugins/v3/aggregates.py
+++ b/nova/api/openstack/compute/plugins/v3/aggregates.py
@@ -25,12 +25,10 @@
from nova.api import validation
from nova.compute import api as compute_api
from nova import exception
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common import log as logging
+from nova.i18n import _
from nova import utils
ALIAS = "os-aggregates"
-LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', "v3:" + ALIAS)
@@ -179,7 +177,8 @@ def _set_metadata(self, req, id, body):
try:
for key, value in metadata.items():
utils.check_string_length(key, "metadata.key", 1, 255)
- utils.check_string_length(value, "metadata.value", 0, 255)
+ if value is not None:
+ utils.check_string_length(value, "metadata.value", 0, 255)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
try:
diff --git a/nova/api/openstack/compute/plugins/v3/attach_interfaces.py b/nova/api/openstack/compute/plugins/v3/attach_interfaces.py
index 477f432be9..cb414d78ca 100644
--- a/nova/api/openstack/compute/plugins/v3/attach_interfaces.py
+++ b/nova/api/openstack/compute/plugins/v3/attach_interfaces.py
@@ -24,8 +24,8 @@
from nova.api import validation
from nova import compute
from nova import exception
+from nova.i18n import _
from nova import network
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
@@ -54,11 +54,13 @@ def __init__(self):
self.network_api = network.API()
super(InterfaceAttachmentController, self).__init__()
+ @extensions.expected_errors((404, 501))
def index(self, req, server_id):
"""Returns the list of interface attachments for a given instance."""
return self._items(req, server_id,
entity_maker=_translate_interface_attachment_view)
+ @extensions.expected_errors(404)
def show(self, req, server_id, id):
"""Return data about the given interface attachment."""
context = req.environ['nova.context']
@@ -75,9 +77,10 @@ def show(self, req, server_id, id):
if port_info['port']['device_id'] != server_id:
raise exc.HTTPNotFound()
- return {'interface_attachment': _translate_interface_attachment_view(
+ return {'interfaceAttachment': _translate_interface_attachment_view(
port_info['port'])}
+ @extensions.expected_errors((400, 404, 409, 500, 501))
@validation.schema(attach_interfaces.create)
def create(self, req, server_id, body):
"""Attach an interface to an instance."""
@@ -88,7 +91,7 @@ def create(self, req, server_id, body):
port_id = None
req_ip = None
if body:
- attachment = body['interface_attachment']
+ attachment = body['interfaceAttachment']
network_id = attachment.get('net_id', None)
port_id = attachment.get('port_id', None)
try:
@@ -123,14 +126,13 @@ def create(self, req, server_id, body):
LOG.exception(e)
raise webob.exc.HTTPInternalServerError(
explanation=e.format_message())
+ except exception.InstanceInvalidState as state_error:
+ common.raise_http_conflict_for_instance_invalid_state(state_error,
+ 'attach_interface')
return self.show(req, server_id, vif['id'])
- def update(self, req, server_id, id, body):
- """Update a interface attachment. We don't currently support this."""
- msg = _("Attachments update is not supported")
- raise exc.HTTPNotImplemented(explanation=msg)
-
+ @extensions.expected_errors((404, 409, 501))
def delete(self, req, server_id, id):
"""Detach an interface from an instance."""
context = req.environ['nova.context']
@@ -149,6 +151,9 @@ def delete(self, req, server_id, id):
raise exc.HTTPConflict(explanation=e.format_message())
except NotImplementedError as e:
raise webob.exc.HTTPNotImplemented(explanation=e.format_message())
+ except exception.InstanceInvalidState as state_error:
+ common.raise_http_conflict_for_instance_invalid_state(state_error,
+ 'detach_interface')
return webob.Response(status_int=202)
@@ -172,7 +177,7 @@ def _items(self, req, server_id, entity_maker):
ports = data.get('ports', [])
results = [entity_maker(port) for port in ports]
- return {'interface_attachments': results}
+ return {'interfaceAttachments': results}
class AttachInterfaces(extensions.V3APIExtensionBase):
diff --git a/nova/api/openstack/compute/plugins/v3/availability_zone.py b/nova/api/openstack/compute/plugins/v3/availability_zone.py
index dcba03beea..d483d89952 100644
--- a/nova/api/openstack/compute/plugins/v3/availability_zone.py
+++ b/nova/api/openstack/compute/plugins/v3/availability_zone.py
@@ -139,5 +139,7 @@ def get_controller_extensions(self):
"""
return []
- def server_create(self, server_dict, create_kwargs):
+ # NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
+ # parameter as this is placed to handle scheduler_hint extension for V2.1.
+ def server_create(self, server_dict, create_kwargs, body_deprecated_param):
create_kwargs['availability_zone'] = server_dict.get(ATTRIBUTE_NAME)
diff --git a/nova/api/openstack/compute/plugins/v3/block_device_mapping.py b/nova/api/openstack/compute/plugins/v3/block_device_mapping.py
index 76d723bd37..0ee6c8340b 100644
--- a/nova/api/openstack/compute/plugins/v3/block_device_mapping.py
+++ b/nova/api/openstack/compute/plugins/v3/block_device_mapping.py
@@ -40,7 +40,9 @@ def get_controller_extensions(self):
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
- def server_create(self, server_dict, create_kwargs):
+ # NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
+ # parameter as this is placed to handle scheduler_hint extension for V2.1.
+ def server_create(self, server_dict, create_kwargs, body_deprecated_param):
block_device_mapping = server_dict.get(ATTRIBUTE_NAME, [])
try:
diff --git a/nova/api/openstack/compute/plugins/v3/cells.py b/nova/api/openstack/compute/plugins/v3/cells.py
index 31c3ffb133..ee35daee43 100644
--- a/nova/api/openstack/compute/plugins/v3/cells.py
+++ b/nova/api/openstack/compute/plugins/v3/cells.py
@@ -22,19 +22,18 @@
from webob import exc
from nova.api.openstack import common
+from nova.api.openstack.compute.schemas.v3 import cells
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
+from nova.api import validation
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute
from nova import exception
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common import log as logging
+from nova.i18n import _
from nova.openstack.common import strutils
-from nova.openstack.common import timeutils
from nova import rpc
-LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
@@ -177,7 +176,10 @@ def show(self, req, id):
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
+
authorize(context)
+ authorize(context, action="delete")
+
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
@@ -186,24 +188,6 @@ def delete(self, req, id):
raise exc.HTTPNotFound(
explanation=_("Cell %s doesn't exist.") % id)
- def _validate_cell_name(self, cell_name):
- """Validate cell name is not empty and doesn't contain '!' or '.'."""
- if not cell_name:
- msg = _("Cell name cannot be empty")
- LOG.error(msg)
- raise exc.HTTPBadRequest(explanation=msg)
- if '!' in cell_name or '.' in cell_name:
- msg = _("Cell name cannot contain '!' or '.'")
- LOG.error(msg)
- raise exc.HTTPBadRequest(explanation=msg)
-
- def _validate_cell_type(self, cell_type):
- """Validate cell_type is 'parent' or 'child'."""
- if cell_type not in ['parent', 'child']:
- msg = _("Cell type must be 'parent' or 'child'")
- LOG.error(msg)
- raise exc.HTTPBadRequest(explanation=msg)
-
def _normalize_cell(self, cell, existing=None):
"""Normalize input cell data. Normalizations include:
@@ -213,7 +197,6 @@ def _normalize_cell(self, cell, existing=None):
# Start with the cell type conversion
if 'type' in cell:
- self._validate_cell_type(cell['type'])
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
# Avoid cell type being overwritten to 'child'
@@ -251,20 +234,15 @@ def _normalize_cell(self, cell, existing=None):
@extensions.expected_errors((400, 403, 501))
@common.check_cells_enabled
@wsgi.response(201)
+ @validation.schema(cells.create)
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
+
authorize(context)
- if 'cell' not in body:
- msg = _("No cell information in request")
- LOG.error(msg)
- raise exc.HTTPBadRequest(explanation=msg)
+ authorize(context, action="create")
+
cell = body['cell']
- if 'name' not in cell:
- msg = _("No cell name in request")
- LOG.error(msg)
- raise exc.HTTPBadRequest(explanation=msg)
- self._validate_cell_name(cell['name'])
self._normalize_cell(cell)
try:
cell = self.cells_rpcapi.cell_create(context, cell)
@@ -274,18 +252,17 @@ def create(self, req, body):
@extensions.expected_errors((400, 403, 404, 501))
@common.check_cells_enabled
+ @validation.schema(cells.update)
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
+
authorize(context)
- if 'cell' not in body:
- msg = _("No cell information in request")
- LOG.error(msg)
- raise exc.HTTPBadRequest(explanation=msg)
+ authorize(context, action="update")
+
cell = body['cell']
cell.pop('id', None)
- if 'name' in cell:
- self._validate_cell_name(cell['name'])
+
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
@@ -308,28 +285,19 @@ def update(self, req, id, body):
@extensions.expected_errors((400, 501))
@common.check_cells_enabled
@wsgi.response(204)
+ @validation.schema(cells.sync_instances)
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
+
authorize(context)
+ authorize(context, action="sync_instances")
+
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
- if body:
- msg = _("Only 'updated_since', 'project_id' and 'deleted' are "
- "understood.")
- raise exc.HTTPBadRequest(explanation=msg)
if isinstance(deleted, six.string_types):
- try:
- deleted = strutils.bool_from_string(deleted, strict=True)
- except ValueError as err:
- raise exc.HTTPBadRequest(explanation=str(err))
- if updated_since:
- try:
- timeutils.parse_isotime(updated_since)
- except ValueError:
- msg = _('Invalid changes-since value')
- raise exc.HTTPBadRequest(explanation=msg)
+ deleted = strutils.bool_from_string(deleted, strict=True)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
diff --git a/nova/api/openstack/compute/plugins/v3/certificates.py b/nova/api/openstack/compute/plugins/v3/certificates.py
index 780cb4914e..6ab3d43231 100644
--- a/nova/api/openstack/compute/plugins/v3/certificates.py
+++ b/nova/api/openstack/compute/plugins/v3/certificates.py
@@ -18,8 +18,8 @@
from nova.api.openstack import wsgi
import nova.cert.rpcapi
from nova import exception
+from nova.i18n import _
from nova import network
-from nova.openstack.common.gettextutils import _
ALIAS = "os-certificates"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
diff --git a/nova/api/openstack/compute/plugins/v3/config_drive.py b/nova/api/openstack/compute/plugins/v3/config_drive.py
index b8c4986cd9..9a8da1011b 100644
--- a/nova/api/openstack/compute/plugins/v3/config_drive.py
+++ b/nova/api/openstack/compute/plugins/v3/config_drive.py
@@ -15,11 +15,13 @@
"""Config Drive extension."""
+from nova.api.openstack.compute.schemas.v3 import config_drive as \
+ schema_config_drive
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
ALIAS = "os-config-drive"
-ATTRIBUTE_NAME = "%s:config_drive" % ALIAS
+ATTRIBUTE_NAME = "config_drive"
authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
@@ -66,5 +68,10 @@ def get_controller_extensions(self):
def get_resources(self):
return []
- def server_create(self, server_dict, create_kwargs):
+ # NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
+ # parameter as this is placed to handle scheduler_hint extension for V2.1.
+ def server_create(self, server_dict, create_kwargs, body_deprecated_param):
create_kwargs['config_drive'] = server_dict.get(ATTRIBUTE_NAME)
+
+ def get_server_create_schema(self):
+ return schema_config_drive.server_create
diff --git a/nova/api/openstack/compute/plugins/v3/console_auth_tokens.py b/nova/api/openstack/compute/plugins/v3/console_auth_tokens.py
index c1e0482218..3d6f47740d 100644
--- a/nova/api/openstack/compute/plugins/v3/console_auth_tokens.py
+++ b/nova/api/openstack/compute/plugins/v3/console_auth_tokens.py
@@ -18,7 +18,7 @@
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.consoleauth import rpcapi as consoleauth_rpcapi
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
ALIAS = "os-console-auth-tokens"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
diff --git a/nova/api/openstack/compute/plugins/v3/console_output.py b/nova/api/openstack/compute/plugins/v3/console_output.py
index 37cef3fd46..7921298bf8 100644
--- a/nova/api/openstack/compute/plugins/v3/console_output.py
+++ b/nova/api/openstack/compute/plugins/v3/console_output.py
@@ -23,7 +23,7 @@
from nova.api import validation
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
ALIAS = "os-console-output"
authorize = extensions.extension_authorizer('compute', "v3:" + ALIAS)
diff --git a/nova/api/openstack/compute/plugins/v3/create_backup.py b/nova/api/openstack/compute/plugins/v3/create_backup.py
index 07f8759e06..61dc1f1f3a 100644
--- a/nova/api/openstack/compute/plugins/v3/create_backup.py
+++ b/nova/api/openstack/compute/plugins/v3/create_backup.py
@@ -24,9 +24,7 @@
from nova.api import validation
from nova import compute
from nova import exception
-from nova.openstack.common import log as logging
-LOG = logging.getLogger(__name__)
ALIAS = "os-create-backup"
authorize = extensions.extension_authorizer('compute', "v3:" + ALIAS)
@@ -36,7 +34,7 @@ def __init__(self, *args, **kwargs):
super(CreateBackupController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
- @extensions.expected_errors((400, 404, 409, 413))
+ @extensions.expected_errors((400, 403, 404, 409))
@wsgi.action('create_backup')
@validation.schema(create_backup.create_backup)
def _create_backup(self, req, id, body):
diff --git a/nova/api/openstack/compute/plugins/v3/deferred_delete.py b/nova/api/openstack/compute/plugins/v3/deferred_delete.py
index f3e30afedc..416ebd1625 100644
--- a/nova/api/openstack/compute/plugins/v3/deferred_delete.py
+++ b/nova/api/openstack/compute/plugins/v3/deferred_delete.py
@@ -33,7 +33,7 @@ def __init__(self, *args, **kwargs):
super(DeferredDeleteController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
- @extensions.expected_errors((404, 409, 413))
+ @extensions.expected_errors((404, 409, 403))
@wsgi.action('restore')
def _restore(self, req, id, body):
"""Restore a previously deleted instance."""
@@ -44,9 +44,7 @@ def _restore(self, req, id, body):
try:
self.compute_api.restore(context, instance)
except exception.QuotaError as error:
- raise webob.exc.HTTPRequestEntityTooLarge(
- explanation=error.format_message(),
- headers={'Retry-After': 0})
+ raise webob.exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'restore')
diff --git a/nova/api/openstack/compute/plugins/v3/evacuate.py b/nova/api/openstack/compute/plugins/v3/evacuate.py
index dc0f39e6ad..6bb616a757 100644
--- a/nova/api/openstack/compute/plugins/v3/evacuate.py
+++ b/nova/api/openstack/compute/plugins/v3/evacuate.py
@@ -23,8 +23,7 @@
from nova.api import validation
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common import log as logging
+from nova.i18n import _
from nova.openstack.common import strutils
from nova import utils
@@ -32,7 +31,6 @@
CONF.import_opt('enable_instance_password',
'nova.api.openstack.compute.servers')
-LOG = logging.getLogger(__name__)
ALIAS = "os-evacuate"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
@@ -55,7 +53,7 @@ def _evacuate(self, req, id, body):
authorize(context)
evacuate_body = body["evacuate"]
- host = evacuate_body["host"]
+ host = evacuate_body.get("host")
on_shared_storage = strutils.bool_from_string(
evacuate_body["on_shared_storage"])
@@ -71,11 +69,12 @@ def _evacuate(self, req, id, body):
elif not on_shared_storage:
password = utils.generate_password()
- try:
- self.host_api.service_get_by_compute_host(context, host)
- except exception.NotFound:
- msg = _("Compute host %s not found.") % host
- raise exc.HTTPNotFound(explanation=msg)
+ if host is not None:
+ try:
+ self.host_api.service_get_by_compute_host(context, host)
+ except exception.NotFound:
+ msg = _("Compute host %s not found.") % host
+ raise exc.HTTPNotFound(explanation=msg)
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
diff --git a/nova/api/openstack/compute/plugins/v3/extended_server_attributes.py b/nova/api/openstack/compute/plugins/v3/extended_server_attributes.py
index 5fdc418e0f..fae7d48f3e 100644
--- a/nova/api/openstack/compute/plugins/v3/extended_server_attributes.py
+++ b/nova/api/openstack/compute/plugins/v3/extended_server_attributes.py
@@ -24,15 +24,14 @@
class ExtendedServerAttributesController(wsgi.Controller):
def _extend_server(self, context, server, instance):
- key = "%s:hypervisor_hostname" % ExtendedServerAttributes.alias
+ key = "OS-EXT-SRV-ATTR:hypervisor_hostname"
server[key] = instance['node']
for attr in ['host', 'name']:
if attr == 'name':
- key = "%s:instance_%s" % (ExtendedServerAttributes.alias,
- attr)
+ key = "OS-EXT-SRV-ATTR:instance_%s" % attr
else:
- key = "%s:%s" % (ExtendedServerAttributes.alias, attr)
+ key = "OS-EXT-SRV-ATTR:%s" % attr
server[key] = instance[attr]
@wsgi.extends
diff --git a/nova/api/openstack/compute/plugins/v3/extended_status.py b/nova/api/openstack/compute/plugins/v3/extended_status.py
index 66258c6a9f..0b4fb4de09 100644
--- a/nova/api/openstack/compute/plugins/v3/extended_status.py
+++ b/nova/api/openstack/compute/plugins/v3/extended_status.py
@@ -29,7 +29,7 @@ def __init__(self, *args, **kwargs):
def _extend_server(self, server, instance):
for state in ['task_state', 'vm_state', 'power_state', 'locked_by']:
- key = "%s:%s" % (ExtendedStatus.alias, state)
+ key = "%s:%s" % ('OS-EXT-STS', state)
server[key] = instance[state]
@wsgi.extends
diff --git a/nova/api/openstack/compute/plugins/v3/extended_volumes.py b/nova/api/openstack/compute/plugins/v3/extended_volumes.py
index 6d5eb48630..1a4714f3fd 100644
--- a/nova/api/openstack/compute/plugins/v3/extended_volumes.py
+++ b/nova/api/openstack/compute/plugins/v3/extended_volumes.py
@@ -23,8 +23,8 @@
from nova.api import validation
from nova import compute
from nova import exception
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import volume
diff --git a/nova/api/openstack/compute/plugins/v3/flavor_access.py b/nova/api/openstack/compute/plugins/v3/flavor_access.py
index 762f24f52e..7c6e1ccb1e 100644
--- a/nova/api/openstack/compute/plugins/v3/flavor_access.py
+++ b/nova/api/openstack/compute/plugins/v3/flavor_access.py
@@ -22,8 +22,8 @@
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
ALIAS = 'flavor-access'
soft_authorize = extensions.soft_extension_authorizer('compute',
diff --git a/nova/api/openstack/compute/plugins/v3/flavors.py b/nova/api/openstack/compute/plugins/v3/flavors.py
index d29156b03b..93c3dd3a79 100644
--- a/nova/api/openstack/compute/plugins/v3/flavors.py
+++ b/nova/api/openstack/compute/plugins/v3/flavors.py
@@ -20,10 +20,12 @@
from nova.api.openstack import wsgi
from nova.compute import flavors
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import strutils
from nova import utils
+ALIAS = 'flavors'
+
class FlavorsController(wsgi.Controller):
"""Flavor controller for the OpenStack API."""
@@ -116,7 +118,7 @@ def _get_flavors(self, req):
class Flavors(extensions.V3APIExtensionBase):
"""Flavors Extension."""
name = "Flavors"
- alias = "flavors"
+ alias = ALIAS
version = 1
def get_resources(self):
@@ -124,7 +126,7 @@ def get_resources(self):
member_actions = {'action': 'POST'}
resources = [
- extensions.ResourceExtension('flavors',
+ extensions.ResourceExtension(ALIAS,
FlavorsController(),
member_name='flavor',
collection_actions=collection_actions,
diff --git a/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py b/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py
index 6c4fd157e6..496772ddc3 100644
--- a/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py
+++ b/nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py
@@ -15,55 +15,43 @@
import webob
+from nova.api.openstack.compute.schemas.v3 import flavors_extraspecs
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
-from nova.compute import flavors
+from nova.api import validation
from nova import exception
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
+
+ALIAS = 'flavor-extra-specs'
+authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class FlavorExtraSpecsController(object):
"""The flavor extra specs API controller for the OpenStack API."""
- ALIAS = 'flavor-extra-specs'
def __init__(self, *args, **kwargs):
super(FlavorExtraSpecsController, self).__init__(*args, **kwargs)
- self.authorize = extensions.extension_authorizer('compute',
- 'v3:' + self.ALIAS)
def _get_extra_specs(self, context, flavor_id):
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
return dict(extra_specs=flavor.extra_specs)
- def _check_body(self, body):
- if body is None or body == "":
- expl = _('No Request Body')
- raise webob.exc.HTTPBadRequest(explanation=expl)
-
- def _check_key_names(self, keys):
- try:
- flavors.validate_extra_spec_keys(keys)
- except exception.InvalidInput as error:
- raise webob.exc.HTTPBadRequest(explanation=error.format_message())
-
@extensions.expected_errors(())
def index(self, req, flavor_id):
"""Returns the list of extra specs for a given flavor."""
context = req.environ['nova.context']
- self.authorize(context, action='index')
+ authorize(context, action='index')
return self._get_extra_specs(context, flavor_id)
@extensions.expected_errors((400, 404, 409))
@wsgi.response(201)
+ @validation.schema(flavors_extraspecs.create)
def create(self, req, flavor_id, body):
context = req.environ['nova.context']
- self.authorize(context, action='create')
- self._check_body(body)
- specs = body.get('extra_specs', {})
- if not specs or type(specs) is not dict:
- raise webob.exc.HTTPBadRequest(_('No or bad extra_specs provided'))
- self._check_key_names(specs.keys())
+ authorize(context, action='create')
+
+ specs = body['extra_specs']
try:
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
flavor.extra_specs = dict(flavor.extra_specs, **specs)
@@ -75,16 +63,14 @@ def create(self, req, flavor_id, body):
return body
@extensions.expected_errors((400, 404, 409))
+ @validation.schema(flavors_extraspecs.update)
def update(self, req, flavor_id, id, body):
context = req.environ['nova.context']
- self.authorize(context, action='update')
- self._check_body(body)
+ authorize(context, action='update')
+
if id not in body:
expl = _('Request body and URI mismatch')
raise webob.exc.HTTPBadRequest(explanation=expl)
- if len(body) > 1:
- expl = _('Request body contains too many items')
- raise webob.exc.HTTPBadRequest(explanation=expl)
try:
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
flavor.extra_specs = dict(flavor.extra_specs, **body)
@@ -99,7 +85,7 @@ def update(self, req, flavor_id, id, body):
def show(self, req, flavor_id, id):
"""Return a single extra spec item."""
context = req.environ['nova.context']
- self.authorize(context, action='show')
+ authorize(context, action='show')
try:
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
return {id: flavor.extra_specs[id]}
@@ -117,7 +103,7 @@ def show(self, req, flavor_id, id):
def delete(self, req, flavor_id, id):
"""Deletes an existing extra spec."""
context = req.environ['nova.context']
- self.authorize(context, action='delete')
+ authorize(context, action='delete')
try:
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
del flavor.extra_specs[id]
@@ -135,12 +121,12 @@ def delete(self, req, flavor_id, id):
class FlavorsExtraSpecs(extensions.V3APIExtensionBase):
"""Flavors extra specs support."""
name = 'FlavorsExtraSpecs'
- alias = FlavorExtraSpecsController.ALIAS
+ alias = ALIAS
version = 1
def get_resources(self):
extra_specs = extensions.ResourceExtension(
- self.alias,
+ ALIAS,
FlavorExtraSpecsController(),
parent=dict(member_name='flavor', collection_name='flavors'))
diff --git a/nova/api/openstack/compute/plugins/v3/hosts.py b/nova/api/openstack/compute/plugins/v3/hosts.py
index 5087b0171e..e1b3399f75 100644
--- a/nova/api/openstack/compute/plugins/v3/hosts.py
+++ b/nova/api/openstack/compute/plugins/v3/hosts.py
@@ -17,11 +17,13 @@
import webob.exc
+from nova.api.openstack.compute.schemas.v3 import hosts
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
+from nova.api import validation
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -37,41 +39,42 @@ def __init__(self):
@extensions.expected_errors(())
def index(self, req):
- """:returns: A dict in the format:
+ """Returns a dict in the format
+
+ | {'hosts': [{'host_name': 'some.host.name',
+ | 'service': 'cells',
+ | 'zone': 'internal'},
+ | {'host_name': 'some.other.host.name',
+ | 'service': 'cells',
+ | 'zone': 'internal'},
+ | {'host_name': 'some.celly.host.name',
+ | 'service': 'cells',
+ | 'zone': 'internal'},
+ | {'host_name': 'console1.host.com',
+ | 'service': 'consoleauth',
+ | 'zone': 'internal'},
+ | {'host_name': 'network1.host.com',
+ | 'service': 'network',
+ | 'zone': 'internal'},
+ | {'host_name': 'netwwork2.host.com',
+ | 'service': 'network',
+ | 'zone': 'internal'},
+ | {'host_name': 'compute1.host.com',
+ | 'service': 'compute',
+ | 'zone': 'nova'},
+ | {'host_name': 'compute2.host.com',
+ | 'service': 'compute',
+ | 'zone': 'nova'},
+ | {'host_name': 'sched1.host.com',
+ | 'service': 'scheduler',
+ | 'zone': 'internal'},
+ | {'host_name': 'sched2.host.com',
+ | 'service': 'scheduler',
+ | 'zone': 'internal'},
+ | {'host_name': 'vol1.host.com',
+ | 'service': 'volume'},
+ | 'zone': 'internal']}
- {'hosts': [{'host_name': 'some.host.name',
- 'service': 'cells',
- 'zone': 'internal'},
- {'host_name': 'some.other.host.name',
- 'service': 'cells',
- 'zone': 'internal'},
- {'host_name': 'some.celly.host.name',
- 'service': 'cells',
- 'zone': 'internal'},
- {'host_name': 'console1.host.com',
- 'service': 'consoleauth',
- 'zone': 'internal'},
- {'host_name': 'network1.host.com',
- 'service': 'network',
- 'zone': 'internal'},
- {'host_name': 'netwwork2.host.com',
- 'service': 'network',
- 'zone': 'internal'},
- {'host_name': 'compute1.host.com',
- 'service': 'compute',
- 'zone': 'nova'},
- {'host_name': 'compute2.host.com',
- 'service': 'compute',
- 'zone': 'nova'},
- {'host_name': 'sched1.host.com',
- 'service': 'scheduler',
- 'zone': 'internal'},
- {'host_name': 'sched2.host.com',
- 'service': 'scheduler',
- 'zone': 'internal'},
- {'host_name': 'vol1.host.com',
- 'service': 'volume'},
- 'zone': 'internal']}
"""
context = req.environ['nova.context']
authorize(context)
@@ -92,49 +95,29 @@ def index(self, req):
return {'hosts': hosts}
@extensions.expected_errors((400, 404, 501))
+ @validation.schema(hosts.update)
def update(self, req, id, body):
""":param body: example format {'host': {'status': 'enable',
'maintenance_mode': 'enable'}}
:returns:
"""
- def read_enabled(orig_val, msg):
+ def read_enabled(orig_val):
""":param orig_val: A string with either 'enable' or 'disable'. May
be surrounded by whitespace, and case doesn't
matter
- :param msg: The message to be passed to HTTPBadRequest. A single
- %s will be replaced with orig_val.
:returns: True for 'enabled' and False for 'disabled'
"""
val = orig_val.strip().lower()
- if val == "enable":
- return True
- elif val == "disable":
- return False
- else:
- raise webob.exc.HTTPBadRequest(explanation=msg % orig_val)
+ return val == "enable"
context = req.environ['nova.context']
authorize(context)
# See what the user wants to 'update'
- if not self.is_valid_body(body, 'host'):
- raise webob.exc.HTTPBadRequest(
- explanation=_("The request body invalid"))
- params = dict([(k.strip().lower(), v)
- for k, v in body['host'].iteritems()])
- orig_status = status = params.pop('status', None)
- orig_maint_mode = maint_mode = params.pop('maintenance_mode', None)
- # Validate the request
- if len(params) > 0:
- # Some extra param was passed. Fail.
- explanation = _("Invalid update setting: '%s'") % params.keys()[0]
- raise webob.exc.HTTPBadRequest(explanation=explanation)
- if orig_status is not None:
- status = read_enabled(orig_status, _("Invalid status: '%s'"))
- if orig_maint_mode is not None:
- maint_mode = read_enabled(orig_maint_mode, _("Invalid mode: '%s'"))
- if status is None and maint_mode is None:
- explanation = _("'status' or 'maintenance_mode' needed for "
- "host update")
- raise webob.exc.HTTPBadRequest(explanation=explanation)
+ status = body['host'].get('status')
+ maint_mode = body['host'].get('maintenance_mode')
+ if status is not None:
+ status = read_enabled(status)
+ if maint_mode is not None:
+ maint_mode = read_enabled(maint_mode)
# Make the calls and merge the results
result = {'host': id}
if status is not None:
@@ -171,9 +154,9 @@ def _set_enabled_status(self, context, host_name, enabled):
on the host.
"""
if enabled:
- LOG.audit(_("Enabling host %s.") % host_name)
+ LOG.audit(_("Enabling host %s."), host_name)
else:
- LOG.audit(_("Disabling host %s.") % host_name)
+ LOG.audit(_("Disabling host %s."), host_name)
try:
result = self.api.set_host_enabled(context, host_name=host_name,
enabled=enabled)
diff --git a/nova/api/openstack/compute/plugins/v3/hypervisors.py b/nova/api/openstack/compute/plugins/v3/hypervisors.py
index 644d73a6be..c38ed4af18 100644
--- a/nova/api/openstack/compute/plugins/v3/hypervisors.py
+++ b/nova/api/openstack/compute/plugins/v3/hypervisors.py
@@ -20,7 +20,8 @@
from nova.api.openstack import extensions
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova import servicegroup
ALIAS = "os-hypervisors"
@@ -32,12 +33,17 @@ class HypervisorsController(object):
def __init__(self):
self.host_api = compute.HostAPI()
+ self.servicegroup_api = servicegroup.API()
super(HypervisorsController, self).__init__()
def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs):
+ alive = self.servicegroup_api.service_is_up(hypervisor['service'])
hyp_dict = {
'id': hypervisor['id'],
'hypervisor_hostname': hypervisor['hypervisor_hostname'],
+ 'state': 'up' if alive else 'down',
+ 'status': ('disabled' if hypervisor['service']['disabled']
+ else 'enabled'),
}
if detail and not servers:
@@ -52,9 +58,10 @@ def _view_hypervisor(self, hypervisor, detail, servers=None, **kwargs):
hyp_dict['service'] = {
'id': hypervisor['service_id'],
'host': hypervisor['service']['host'],
+ 'disabled_reason': hypervisor['service']['disabled_reason'],
}
- if servers != None:
+ if servers is not None:
hyp_dict['servers'] = [dict(name=serv['name'], id=serv['uuid'])
for serv in servers]
diff --git a/nova/api/openstack/compute/plugins/v3/ips.py b/nova/api/openstack/compute/plugins/v3/ips.py
index ded2524fbc..cfdbacf44c 100644
--- a/nova/api/openstack/compute/plugins/v3/ips.py
+++ b/nova/api/openstack/compute/plugins/v3/ips.py
@@ -20,7 +20,9 @@
from nova.api.openstack.compute.views import addresses as views_addresses
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+
+ALIAS = 'ips'
class IPsController(wsgi.Controller):
@@ -40,12 +42,6 @@ def _get_instance(self, context, server_id):
raise exc.HTTPNotFound(explanation=msg)
return instance
- def create(self, req, server_id, body):
- raise exc.HTTPNotImplemented()
-
- def delete(self, req, server_id, id):
- raise exc.HTTPNotImplemented()
-
def index(self, req, server_id):
context = req.environ["nova.context"]
instance = self._get_instance(context, server_id)
@@ -67,7 +63,7 @@ class IPs(extensions.V3APIExtensionBase):
"""Server addresses."""
name = "Ips"
- alias = "ips"
+ alias = ALIAS
version = 1
def get_resources(self):
@@ -75,7 +71,7 @@ def get_resources(self):
'collection_name': 'servers'}
resources = [
extensions.ResourceExtension(
- 'ips', IPsController(), parent=parent, member_name='ip')]
+ ALIAS, IPsController(), parent=parent, member_name='ip')]
return resources
diff --git a/nova/api/openstack/compute/plugins/v3/keypairs.py b/nova/api/openstack/compute/plugins/v3/keypairs.py
index 503a1a5126..f25b4c74c9 100644
--- a/nova/api/openstack/compute/plugins/v3/keypairs.py
+++ b/nova/api/openstack/compute/plugins/v3/keypairs.py
@@ -24,7 +24,7 @@
from nova.api import validation
from nova.compute import api as compute_api
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
ALIAS = 'keypairs'
@@ -48,7 +48,7 @@ def _filter_keypair(self, keypair, **attrs):
clean[attr] = keypair[attr]
return clean
- @extensions.expected_errors((400, 409, 413))
+ @extensions.expected_errors((400, 403, 409))
@wsgi.response(201)
@validation.schema(keypairs.create)
def create(self, req, body):
@@ -86,9 +86,7 @@ def create(self, req, body):
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
- raise webob.exc.HTTPRequestEntityTooLarge(
- explanation=msg,
- headers={'Retry-After': 0})
+ raise webob.exc.HTTPForbidden(explanation=msg)
except exception.InvalidKeypair as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.KeyPairExists as exc:
@@ -178,5 +176,10 @@ def get_controller_extensions(self):
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
- def server_create(self, server_dict, create_kwargs):
+ # NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
+ # parameter as this is placed to handle scheduler_hint extension for V2.1.
+ def server_create(self, server_dict, create_kwargs, body_deprecated_param):
create_kwargs['key_name'] = server_dict.get('key_name')
+
+ def get_server_create_schema(self):
+ return keypairs.server_create
diff --git a/nova/api/openstack/compute/plugins/v3/lock_server.py b/nova/api/openstack/compute/plugins/v3/lock_server.py
index 4df6df24f7..662e1b2baf 100644
--- a/nova/api/openstack/compute/plugins/v3/lock_server.py
+++ b/nova/api/openstack/compute/plugins/v3/lock_server.py
@@ -19,9 +19,7 @@
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
-from nova.openstack.common import log as logging
-LOG = logging.getLogger(__name__)
ALIAS = "os-lock-server"
diff --git a/nova/api/openstack/compute/plugins/v3/migrate_server.py b/nova/api/openstack/compute/plugins/v3/migrate_server.py
index 179d776e27..fb6bfd45f9 100644
--- a/nova/api/openstack/compute/plugins/v3/migrate_server.py
+++ b/nova/api/openstack/compute/plugins/v3/migrate_server.py
@@ -23,10 +23,8 @@
from nova.api import validation
from nova import compute
from nova import exception
-from nova.openstack.common import log as logging
from nova.openstack.common import strutils
-LOG = logging.getLogger(__name__)
ALIAS = "os-migrate-server"
@@ -40,7 +38,7 @@ def __init__(self, *args, **kwargs):
super(MigrateServerController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
- @extensions.expected_errors((400, 404, 409, 413))
+ @extensions.expected_errors((400, 403, 404, 409))
@wsgi.action('migrate')
def _migrate(self, req, id, body):
"""Permit admins to migrate a server to a new host."""
@@ -51,12 +49,8 @@ def _migrate(self, req, id, body):
want_objects=True)
try:
self.compute_api.resize(req.environ['nova.context'], instance)
- except exception.TooManyInstances as e:
- raise exc.HTTPRequestEntityTooLarge(explanation=e.format_message())
- except exception.QuotaError as error:
- raise exc.HTTPRequestEntityTooLarge(
- explanation=error.format_message(),
- headers={'Retry-After': 0})
+ except (exception.TooManyInstances, exception.QuotaError) as e:
+ raise exc.HTTPForbidden(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
diff --git a/nova/api/openstack/compute/plugins/v3/multinic.py b/nova/api/openstack/compute/plugins/v3/multinic.py
index cb6aee2224..d7c1296c88 100644
--- a/nova/api/openstack/compute/plugins/v3/multinic.py
+++ b/nova/api/openstack/compute/plugins/v3/multinic.py
@@ -25,10 +25,8 @@
from nova.api import validation
from nova import compute
from nova import exception
-from nova.openstack.common import log as logging
-LOG = logging.getLogger(__name__)
ALIAS = "os-multinic"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
@@ -49,7 +47,11 @@ def _add_fixed_ip(self, req, id, body):
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
network_id = body['add_fixed_ip']['network_id']
- self.compute_api.add_fixed_ip(context, instance, network_id)
+ try:
+ self.compute_api.add_fixed_ip(context, instance, network_id)
+ except exception.NoMoreFixedIps as e:
+ raise exc.HTTPBadRequest(explanation=e.format_message())
+
return webob.Response(status_int=202)
@wsgi.action('remove_fixed_ip')
diff --git a/nova/api/openstack/compute/plugins/v3/multiple_create.py b/nova/api/openstack/compute/plugins/v3/multiple_create.py
index 684ec07e0c..4dc1cfa64f 100644
--- a/nova/api/openstack/compute/plugins/v3/multiple_create.py
+++ b/nova/api/openstack/compute/plugins/v3/multiple_create.py
@@ -17,7 +17,7 @@
from nova.api.openstack import extensions
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import strutils
from nova import utils
@@ -42,7 +42,9 @@ def get_controller_extensions(self):
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
- def server_create(self, server_dict, create_kwargs):
+ # NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
+ # parameter as this is placed to handle scheduler_hint extension for V2.1.
+ def server_create(self, server_dict, create_kwargs, body_deprecated_param):
# min_count and max_count are optional. If they exist, they may come
# in as strings. Verify that they are valid integers and > 0.
# Also, we want to default 'min_count' to 1, and default
diff --git a/nova/api/openstack/compute/plugins/v3/pause_server.py b/nova/api/openstack/compute/plugins/v3/pause_server.py
index 25b758b986..1e832c96a7 100644
--- a/nova/api/openstack/compute/plugins/v3/pause_server.py
+++ b/nova/api/openstack/compute/plugins/v3/pause_server.py
@@ -21,10 +21,8 @@
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common import log as logging
+from nova.i18n import _
-LOG = logging.getLogger(__name__)
ALIAS = "os-pause-server"
diff --git a/nova/api/openstack/compute/plugins/v3/quota_sets.py b/nova/api/openstack/compute/plugins/v3/quota_sets.py
index 8dd99b6905..e857d7a7d7 100644
--- a/nova/api/openstack/compute/plugins/v3/quota_sets.py
+++ b/nova/api/openstack/compute/plugins/v3/quota_sets.py
@@ -21,9 +21,9 @@
from nova.api.openstack import wsgi
from nova.api import validation
import nova.context
-from nova import db
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova import quota
@@ -158,11 +158,11 @@ def update(self, req, id, body):
maximum = settable_quotas[key]['maximum']
self._validate_quota_limit(value, minimum, maximum)
try:
- db.quota_create(context, project_id, key, value,
- user_id=user_id)
+ objects.Quotas.create_limit(context, project_id,
+ key, value, user_id=user_id)
except exception.QuotaExists:
- db.quota_update(context, project_id, key, value,
- user_id=user_id)
+ objects.Quotas.update_limit(context, project_id,
+ key, value, user_id=user_id)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
return self._format_quota_set(id, self._get_quotas(context, id,
diff --git a/nova/api/openstack/compute/plugins/v3/remote_consoles.py b/nova/api/openstack/compute/plugins/v3/remote_consoles.py
index 7d8310f38c..0f1277c957 100644
--- a/nova/api/openstack/compute/plugins/v3/remote_consoles.py
+++ b/nova/api/openstack/compute/plugins/v3/remote_consoles.py
@@ -21,7 +21,7 @@
from nova.api import validation
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
ALIAS = "os-remote-consoles"
diff --git a/nova/api/openstack/compute/plugins/v3/rescue.py b/nova/api/openstack/compute/plugins/v3/rescue.py
index ea8fa75bdc..5a5a703682 100644
--- a/nova/api/openstack/compute/plugins/v3/rescue.py
+++ b/nova/api/openstack/compute/plugins/v3/rescue.py
@@ -25,7 +25,6 @@
from nova.api import validation
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
from nova import utils
@@ -76,9 +75,6 @@ def _rescue(self, req, id, body):
except exception.InstanceNotRescuable as non_rescuable:
raise exc.HTTPBadRequest(
explanation=non_rescuable.format_message())
- except NotImplementedError:
- msg = _("The rescue operation is not implemented by this cloud.")
- raise exc.HTTPNotImplemented(explanation=msg)
if CONF.enable_instance_password:
return {'admin_password': password}
@@ -100,9 +96,6 @@ def _unrescue(self, req, id, body):
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unrescue')
- except NotImplementedError:
- msg = _("The unrescue operation is not implemented by this cloud.")
- raise exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
diff --git a/nova/api/openstack/compute/plugins/v3/scheduler_hints.py b/nova/api/openstack/compute/plugins/v3/scheduler_hints.py
index c2fed29ab9..c556cf5f38 100644
--- a/nova/api/openstack/compute/plugins/v3/scheduler_hints.py
+++ b/nova/api/openstack/compute/plugins/v3/scheduler_hints.py
@@ -15,7 +15,7 @@
import webob.exc
from nova.api.openstack import extensions
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
ALIAS = "os-scheduler-hints"
@@ -33,8 +33,17 @@ def get_controller_extensions(self):
def get_resources(self):
return []
- def server_create(self, server_dict, create_kwargs):
- scheduler_hints = server_dict.get(ALIAS + ':scheduler_hints', {})
+ # NOTE(gmann): Accepting request body in this function to fetch "scheduler
+ # hint". This is a workaround to allow OS_SCH-HNT at the top level
+ # of the body request, but that it will be changed in the future to be a
+ # subset of the servers dict.
+ def server_create(self, server_dict, create_kwargs, req_body):
+ scheduler_hints = {}
+ if 'os:scheduler_hints' in req_body:
+ scheduler_hints = req_body['os:scheduler_hints']
+ elif 'OS-SCH-HNT:scheduler_hints' in req_body:
+ scheduler_hints = req_body['OS-SCH-HNT:scheduler_hints']
+
if not isinstance(scheduler_hints, dict):
msg = _("Malformed scheduler_hints attribute")
raise webob.exc.HTTPBadRequest(explanation=msg)
diff --git a/nova/api/openstack/compute/plugins/v3/security_groups.py b/nova/api/openstack/compute/plugins/v3/security_groups.py
index 9d807a5298..9340670bfb 100644
--- a/nova/api/openstack/compute/plugins/v3/security_groups.py
+++ b/nova/api/openstack/compute/plugins/v3/security_groups.py
@@ -16,8 +16,8 @@
"""The security groups extension."""
-import json
-
+from nova.api.openstack.compute.schemas.v3 import security_groups as \
+ schema_security_groups
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
@@ -25,6 +25,7 @@
from nova import exception
from nova.network.security_group import neutron_driver
from nova.network.security_group import openstack_driver
+from nova.openstack.common import jsonutils
ALIAS = 'os-security-groups'
@@ -79,7 +80,7 @@ def _extend_servers(self, req, servers):
# one server in an API request.
else:
# try converting to json
- req_obj = json.loads(req.body)
+ req_obj = jsonutils.loads(req.body)
# Add security group to server, if no security group was in
# request add default since that is the group it is part of
servers[0][ATTRIBUTE_NAME] = req_obj['server'].get(
@@ -120,7 +121,9 @@ def get_controller_extensions(self):
def get_resources(self):
return []
- def server_create(self, server_dict, create_kwargs):
+ # NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
+ # parameter as this is placed to handle scheduler_hint extension for V2.1.
+ def server_create(self, server_dict, create_kwargs, body_deprecated_param):
security_groups = server_dict.get(ATTRIBUTE_NAME)
if security_groups is not None:
create_kwargs['security_group'] = [
@@ -128,6 +131,9 @@ def server_create(self, server_dict, create_kwargs):
create_kwargs['security_group'] = list(
set(create_kwargs['security_group']))
+ def get_server_create_schema(self):
+ return schema_security_groups.server_create
+
class NativeSecurityGroupExceptions(object):
@staticmethod
diff --git a/nova/api/openstack/compute/plugins/v3/server_actions.py b/nova/api/openstack/compute/plugins/v3/server_actions.py
index edfed81f6c..876972f066 100644
--- a/nova/api/openstack/compute/plugins/v3/server_actions.py
+++ b/nova/api/openstack/compute/plugins/v3/server_actions.py
@@ -19,7 +19,7 @@
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
ALIAS = "os-server-actions"
authorize_actions = extensions.extension_authorizer('compute',
diff --git a/nova/api/openstack/compute/plugins/v3/server_diagnostics.py b/nova/api/openstack/compute/plugins/v3/server_diagnostics.py
index 42f16dce61..57fe3ae406 100644
--- a/nova/api/openstack/compute/plugins/v3/server_diagnostics.py
+++ b/nova/api/openstack/compute/plugins/v3/server_diagnostics.py
@@ -19,6 +19,7 @@
from nova.api.openstack import extensions
from nova import compute
from nova import exception
+from nova.i18n import _
ALIAS = "os-server-diagnostics"
@@ -26,21 +27,27 @@
class ServerDiagnosticsController(object):
- @extensions.expected_errors((404, 409))
+ def __init__(self):
+ self.compute_api = compute.API()
+
+ @extensions.expected_errors((404, 409, 501))
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
- compute_api = compute.API()
try:
- instance = compute_api.get(context, server_id, want_objects=True)
+ instance = self.compute_api.get(context, server_id,
+ want_objects=True)
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
try:
- return compute_api.get_instance_diagnostics(context, instance)
+ return self.compute_api.get_instance_diagnostics(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'get_diagnostics')
+ except NotImplementedError:
+ msg = _("Unable to get diagnostics, functionality not implemented")
+ raise webob.exc.HTTPNotImplemented(explanation=msg)
class ServerDiagnostics(extensions.V3APIExtensionBase):
diff --git a/nova/api/openstack/compute/plugins/v3/server_external_events.py b/nova/api/openstack/compute/plugins/v3/server_external_events.py
index b3f848d145..bc59733c04 100644
--- a/nova/api/openstack/compute/plugins/v3/server_external_events.py
+++ b/nova/api/openstack/compute/plugins/v3/server_external_events.py
@@ -18,9 +18,9 @@
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
+from nova.i18n import _
from nova import objects
from nova.objects import external_event as external_event_obj
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
diff --git a/nova/api/openstack/compute/plugins/v3/server_metadata.py b/nova/api/openstack/compute/plugins/v3/server_metadata.py
index 7997858336..44ef229969 100644
--- a/nova/api/openstack/compute/plugins/v3/server_metadata.py
+++ b/nova/api/openstack/compute/plugins/v3/server_metadata.py
@@ -20,7 +20,9 @@
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+
+ALIAS = 'server-metadata'
class ServerMetadataController(wsgi.Controller):
@@ -51,7 +53,7 @@ def index(self, req, server_id):
context = req.environ['nova.context']
return {'metadata': self._get_metadata(context, server_id)}
- @extensions.expected_errors((400, 404, 409, 413))
+ @extensions.expected_errors((400, 403, 404, 409, 413))
@wsgi.response(201)
def create(self, req, server_id, body):
if not self.is_valid_body(body, 'metadata'):
@@ -67,7 +69,7 @@ def create(self, req, server_id, body):
return {'metadata': new_metadata}
- @extensions.expected_errors((400, 404, 409, 413))
+ @extensions.expected_errors((400, 403, 404, 409, 413))
def update(self, req, server_id, id, body):
if not self.is_valid_body(body, 'metadata'):
msg = _("Malformed request body")
@@ -89,7 +91,7 @@ def update(self, req, server_id, id, body):
return {'metadata': meta_item}
- @extensions.expected_errors((400, 404, 409, 413))
+ @extensions.expected_errors((400, 403, 404, 409, 413))
def update_all(self, req, server_id, body):
if not self.is_valid_body(body, 'metadata'):
msg = _("Malformed request body")
@@ -121,9 +123,7 @@ def _update_instance_metadata(self, context, server_id, metadata,
explanation=error.format_message())
except exception.QuotaError as error:
- raise exc.HTTPRequestEntityTooLarge(
- explanation=error.format_message(),
- headers={'Retry-After': 0})
+ raise exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
@@ -172,7 +172,7 @@ def delete(self, req, server_id, id):
class ServerMetadata(extensions.V3APIExtensionBase):
"""Server Metadata API."""
name = "ServerMetadata"
- alias = "server-metadata"
+ alias = ALIAS
version = 1
def get_resources(self):
diff --git a/nova/api/openstack/compute/plugins/v3/servers.py b/nova/api/openstack/compute/plugins/v3/servers.py
index 60ab3b8ad8..ce70cc8473 100644
--- a/nova/api/openstack/compute/plugins/v3/servers.py
+++ b/nova/api/openstack/compute/plugins/v3/servers.py
@@ -25,15 +25,18 @@
from webob import exc
from nova.api.openstack import common
+from nova.api.openstack.compute.schemas.v3 import servers as schema_servers
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
+from nova.api import validation
from nova import compute
from nova.compute import flavors
from nova import exception
+from nova.i18n import _
+from nova.i18n import _LW
from nova.image import glance
from nova import objects
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
@@ -69,6 +72,8 @@ class ServersController(wsgi.Controller):
_view_builder_class = views_servers.ViewBuilderV3
+ schema_server_create = schema_servers.base_create
+
@staticmethod
def _add_location(robj):
# Just in case...
@@ -96,13 +101,13 @@ def check_whiteblack_lists(ext):
if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:
return True
else:
- LOG.warning(_("Not loading %s because it is "
- "in the blacklist"), ext.obj.alias)
+ LOG.warn(_LW("Not loading %s because it is "
+ "in the blacklist"), ext.obj.alias)
return False
else:
- LOG.warning(
- _("Not loading %s because it is not in the whitelist"),
- ext.obj.alias)
+ LOG.warn(
+ _LW("Not loading %s because it is not in the "
+ "whitelist"), ext.obj.alias)
return False
def check_load_extension(ext):
@@ -166,6 +171,21 @@ def check_load_extension(ext):
if not list(self.update_extension_manager):
LOG.debug("Did not find any server update extensions")
+ # Look for API schema of server create extension
+ self.create_schema_manager = \
+ stevedore.enabled.EnabledExtensionManager(
+ namespace=self.EXTENSION_CREATE_NAMESPACE,
+ check_func=_check_load_extension('get_server_create_schema'),
+ invoke_on_load=True,
+ invoke_kwds={"extension_info": self.extension_info},
+ propagate_map_exceptions=True)
+ if list(self.create_schema_manager):
+ self.create_schema_manager.map(self._create_extension_schema,
+ self.schema_server_create)
+ else:
+ LOG.debug("Did not find any server create schemas")
+
+ @extensions.expected_errors((400, 403))
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
@@ -174,6 +194,7 @@ def index(self, req):
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
+ @extensions.expected_errors((400, 403))
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
@@ -194,9 +215,11 @@ def _get_servers(self, req, is_detail):
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
- status = search_opts.pop('status', None)
- if status is not None:
- vm_state, task_state = common.task_and_vm_state_from_status(status)
+ search_opts.pop('status', None)
+ if 'status' in req.GET.keys():
+ statuses = req.GET.getall('status')
+ states = common.task_and_vm_state_from_status(statuses)
+ vm_state, task_state = states
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
@@ -205,28 +228,25 @@ def _get_servers(self, req, is_detail):
if 'default' not in task_state:
search_opts['task_state'] = task_state
- if 'changes_since' in search_opts:
+ if 'changes-since' in search_opts:
try:
- parsed = timeutils.parse_isotime(search_opts['changes_since'])
+ parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
- msg = _('Invalid changes_since value')
+ msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
- search_opts['changes_since'] = parsed
+ search_opts['changes-since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
- # ... Unless 'changes_since' is specified, because 'changes_since'
+ # ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
- if 'changes_since' not in search_opts:
- # No 'changes_since', so we only want non-deleted servers
+ if 'changes-since' not in search_opts:
+ # No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
- if 'changes_since' in search_opts:
- search_opts['changes-since'] = search_opts.pop('changes_since')
-
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
@@ -396,6 +416,7 @@ def _decode_base64(self, data):
except TypeError:
return None
+ @extensions.expected_errors(404)
def show(self, req, id):
"""Returns server details by server id."""
context = req.environ['nova.context']
@@ -405,11 +426,11 @@ def show(self, req, id):
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
+ @extensions.expected_errors((400, 403, 409, 413))
@wsgi.response(202)
+ @validation.schema(schema_server_create)
def create(self, req, body):
"""Creates a new server for a given user."""
- if not self.is_valid_body(body, 'server'):
- raise exc.HTTPBadRequest(_("The request body is invalid"))
context = req.environ['nova.context']
server_dict = body['server']
@@ -435,7 +456,7 @@ def create(self, req, body):
# moved to the extension
if list(self.create_extension_manager):
self.create_extension_manager.map(self._create_extension_point,
- server_dict, create_kwargs)
+ server_dict, create_kwargs, body)
image_uuid = self._image_from_req_data(server_dict, create_kwargs)
@@ -454,7 +475,7 @@ def create(self, req, body):
# Replace with an extension point when the os-networks
# extension is ported. Currently reworked
# to take into account is_neutron
- #if (self.ext_mgr.is_loaded('os-networks')
+ # if (self.ext_mgr.is_loaded('os-networks')
# or utils.is_neutron()):
# requested_networks = server_dict.get('networks')
@@ -467,7 +488,7 @@ def create(self, req, body):
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
- msg = _("Invalid flavor_ref provided.")
+ msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
try:
@@ -485,7 +506,7 @@ def create(self, req, body):
**create_kwargs)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
- raise exc.HTTPRequestEntityTooLarge(
+ raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InvalidMetadataSize as error:
@@ -495,7 +516,7 @@ def create(self, req, body):
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
- msg = _("Invalid flavor_ref provided.")
+ msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
@@ -527,6 +548,7 @@ def create(self, req, body):
exception.NetworkNotFound) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
+ exception.NetworkAmbiguous,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
@@ -539,17 +561,21 @@ def create(self, req, body):
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
- server['server']['admin_password'] = password
+ server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
- def _create_extension_point(self, ext, server_dict, create_kwargs):
+ # NOTE(gmann): Parameter 'req_body' is placed to handle scheduler_hint
+ # extension for V2.1. No other extension supposed to use this as
+ # it will be removed soon.
+ def _create_extension_point(self, ext, server_dict,
+ create_kwargs, req_body):
handler = ext.obj
LOG.debug("Running _create_extension_point for %s", ext.obj)
- handler.server_create(server_dict, create_kwargs)
+ handler.server_create(server_dict, create_kwargs, req_body)
def _rebuild_extension_point(self, ext, rebuild_dict, rebuild_kwargs):
handler = ext.obj
@@ -568,6 +594,13 @@ def _update_extension_point(self, ext, update_dict, update_kwargs):
LOG.debug("Running _update_extension_point for %s", ext.obj)
handler.server_update(update_dict, update_kwargs)
+ def _create_extension_schema(self, ext, create_schema):
+ handler = ext.obj
+ LOG.debug("Running _create_extension_schema for %s", ext.obj)
+
+ schema = handler.get_server_create_schema()
+ create_schema['properties']['server']['properties'].update(schema)
+
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
@@ -581,6 +614,7 @@ def _delete(self, context, req, instance_uuid):
else:
self.compute_api.delete(context, instance)
+ @extensions.expected_errors((400, 404))
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
@@ -617,6 +651,7 @@ def update(self, req, id, body):
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
+ @extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('confirm_resize')
def _action_confirm_resize(self, req, id, body):
@@ -633,6 +668,7 @@ def _action_confirm_resize(self, req, id, body):
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirm_resize')
+ @extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('revert_resize')
def _action_revert_resize(self, req, id, body):
@@ -653,6 +689,7 @@ def _action_revert_resize(self, req, id, body):
'revert_resize')
return webob.Response(status_int=202)
+ @extensions.expected_errors((400, 404, 409))
@wsgi.response(202)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
@@ -692,7 +729,7 @@ def _resize(self, req, instance_id, flavor_id, **kwargs):
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
- raise exc.HTTPRequestEntityTooLarge(
+ raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
@@ -701,6 +738,8 @@ def _resize(self, req, instance_id, flavor_id, **kwargs):
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
+ except exception.CannotResizeDisk as e:
+ raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
@@ -720,6 +759,7 @@ def _resize(self, req, instance_id, flavor_id, **kwargs):
return webob.Response(status_int=202)
+ @extensions.expected_errors((404, 409))
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
@@ -740,7 +780,7 @@ def _image_uuid_from_href(self, image_href):
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
- msg = _("Invalid image_ref provided.")
+ msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
@@ -749,46 +789,48 @@ def _image_from_req_data(self, server_dict, create_kwargs):
"""Get image data from the request or raise appropriate
exceptions.
- The field image_ref is mandatory when no block devices have been
+ The field imageRef is mandatory when no block devices have been
defined and must be a proper uuid when present.
"""
- image_href = server_dict.get('image_ref')
+ image_href = server_dict.get('imageRef')
if not image_href and create_kwargs.get('block_device_mapping'):
return ''
elif image_href:
return self._image_uuid_from_href(unicode(image_href))
else:
- msg = _("Missing image_ref attribute")
+ msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _flavor_id_from_req_data(self, data):
try:
- flavor_ref = data['server']['flavor_ref']
+ flavor_ref = data['server']['flavorRef']
except (TypeError, KeyError):
- msg = _("Missing flavor_ref attribute")
+ msg = _("Missing flavorRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
return common.get_id_from_href(flavor_ref)
+ @extensions.expected_errors((400, 401, 403, 404, 409))
@wsgi.response(202)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
resize_dict = body['resize']
try:
- flavor_ref = str(resize_dict["flavor_ref"])
+ flavor_ref = str(resize_dict["flavorRef"])
if not flavor_ref:
- msg = _("Resize request has invalid 'flavor_ref' attribute.")
+ msg = _("Resize request has invalid 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
- msg = _("Resize requests require 'flavor_ref' attribute.")
+ msg = _("Resize requests require 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
resize_kwargs = {}
return self._resize(req, id, flavor_ref, **resize_kwargs)
+ @extensions.expected_errors((400, 403, 404, 409, 413))
@wsgi.response(202)
@wsgi.action('rebuild')
def _action_rebuild(self, req, id, body):
@@ -796,9 +838,9 @@ def _action_rebuild(self, req, id, body):
rebuild_dict = body['rebuild']
try:
- image_href = rebuild_dict["image_ref"]
+ image_href = rebuild_dict["imageRef"]
except (KeyError, TypeError):
- msg = _("Could not parse image_ref from request.")
+ msg = _("Could not parse imageRef from request.")
raise exc.HTTPBadRequest(explanation=msg)
image_href = self._image_uuid_from_href(image_href)
@@ -853,6 +895,8 @@ def _action_rebuild(self, req, id, body):
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
+ except exception.QuotaError as error:
+ raise exc.HTTPForbidden(explanation=error.format_message())
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
@@ -866,11 +910,12 @@ def _action_rebuild(self, req, id, body):
# Add on the admin_password attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
- view['server']['admin_password'] = password
+ view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
+ @extensions.expected_errors((400, 403, 404, 409))
@wsgi.response(202)
@wsgi.action('create_image')
@common.check_snapshots_enabled
@@ -904,10 +949,10 @@ def _action_create_image(self, req, id, body):
bdms):
img = instance['image_ref']
if not img:
- props = bdms.root_metadata(
+ properties = bdms.root_metadata(
context, self.compute_api.image_api,
self.compute_api.volume_api)
- image_meta = {'properties': props}
+ image_meta = {'properties': properties}
else:
image_meta = self.compute_api.image_api.get(context, img)
@@ -939,12 +984,12 @@ def _action_create_image(self, req, id, body):
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
- password = server['admin_password']
+ password = server['adminPass']
self._validate_admin_password(password)
except KeyError:
password = utils.generate_password()
except ValueError:
- raise exc.HTTPBadRequest(explanation=_("Invalid admin_password"))
+ raise exc.HTTPBadRequest(explanation=_("Invalid adminPass"))
return password
@@ -955,7 +1000,7 @@ def _validate_admin_password(self, password):
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
- 'ip', 'changes_since', 'all_tenants')
+ 'ip', 'changes-since', 'all_tenants')
def _get_instance(self, context, instance_uuid):
try:
diff --git a/nova/api/openstack/compute/plugins/v3/services.py b/nova/api/openstack/compute/plugins/v3/services.py
index 6ef0507ff6..f07966de1d 100644
--- a/nova/api/openstack/compute/plugins/v3/services.py
+++ b/nova/api/openstack/compute/plugins/v3/services.py
@@ -19,7 +19,7 @@
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova import servicegroup
from nova import utils
diff --git a/nova/api/openstack/compute/plugins/v3/user_data.py b/nova/api/openstack/compute/plugins/v3/user_data.py
index ab7874c04a..dda968eace 100644
--- a/nova/api/openstack/compute/plugins/v3/user_data.py
+++ b/nova/api/openstack/compute/plugins/v3/user_data.py
@@ -32,5 +32,7 @@ def get_controller_extensions(self):
def get_resources(self):
return []
- def server_create(self, server_dict, create_kwargs):
+ # NOTE(gmann): This function is not supposed to use 'body_deprecated_param'
+ # parameter as this is placed to handle scheduler_hint extension for V2.1.
+ def server_create(self, server_dict, create_kwargs, body_deprecated_param):
create_kwargs['user_data'] = server_dict.get(ATTRIBUTE_NAME)
diff --git a/nova/api/openstack/compute/schemas/v3/admin_password.py b/nova/api/openstack/compute/schemas/v3/admin_password.py
index 04bcad7bbf..a36b70950c 100644
--- a/nova/api/openstack/compute/schemas/v3/admin_password.py
+++ b/nova/api/openstack/compute/schemas/v3/admin_password.py
@@ -18,15 +18,15 @@
change_password = {
'type': 'object',
'properties': {
- 'change_password': {
+ 'changePassword': {
'type': 'object',
'properties': {
- 'admin_password': parameter_types.admin_password,
+ 'adminPass': parameter_types.admin_password,
},
- 'required': ['admin_password'],
+ 'required': ['adminPass'],
'additionalProperties': False,
},
},
- 'required': ['change_password'],
+ 'required': ['changePassword'],
'additionalProperties': False,
}
diff --git a/nova/api/openstack/compute/schemas/v3/attach_interfaces.py b/nova/api/openstack/compute/schemas/v3/attach_interfaces.py
index 471275a02f..921ebc12fc 100644
--- a/nova/api/openstack/compute/schemas/v3/attach_interfaces.py
+++ b/nova/api/openstack/compute/schemas/v3/attach_interfaces.py
@@ -15,7 +15,7 @@
create = {
'type': 'object',
'properties': {
- 'interface_attachment': {
+ 'interfaceAttachment': {
'type': 'object',
'properties': {
'net_id': {
diff --git a/nova/api/openstack/compute/schemas/v3/cells.py b/nova/api/openstack/compute/schemas/v3/cells.py
new file mode 100644
index 0000000000..37a9ed5cc0
--- /dev/null
+++ b/nova/api/openstack/compute/schemas/v3/cells.py
@@ -0,0 +1,99 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.validation import parameter_types
+
+
+create = {
+ 'type': 'object',
+ 'properties': {
+ 'cell': {
+ 'type': 'object',
+ 'properties': {
+ 'name': parameter_types.name,
+ 'type': {
+ 'type': 'string',
+ 'enum': ['parent', 'child'],
+ },
+
+ # NOTE: In unparse_transport_url(), a url consists of the
+ # following parameters:
+ # "qpid://:@:/"
+ # or
+ # "rabiit://:@:/"
+ # Then the url is stored into transport_url of cells table
+ # which is defined with String(255).
+ 'username': {
+ 'type': 'string', 'maxLength': 255,
+ 'pattern': '^[a-zA-Z0-9-_]*$'
+ },
+ 'password': {
+ # Allow to specify any string for strong password.
+ 'type': 'string', 'maxLength': 255,
+ },
+ 'rpc_host': parameter_types.hostname_or_ip_address,
+ 'rpc_port': parameter_types.tcp_udp_port,
+ 'rpc_virtual_host': parameter_types.hostname_or_ip_address,
+ },
+ 'required': ['name'],
+ 'additionalProperties': False,
+ },
+ },
+ 'required': ['cell'],
+ 'additionalProperties': False,
+}
+
+
+update = {
+ 'type': 'object',
+ 'properties': {
+ 'cell': {
+ 'type': 'object',
+ 'properties': {
+ 'name': parameter_types.name,
+ 'type': {
+ 'type': 'string',
+ 'enum': ['parent', 'child'],
+ },
+ 'username': {
+ 'type': 'string', 'maxLength': 255,
+ 'pattern': '^[a-zA-Z0-9-_]*$'
+ },
+ 'password': {
+ 'type': 'string', 'maxLength': 255,
+ },
+ 'rpc_host': parameter_types.hostname_or_ip_address,
+ 'rpc_port': parameter_types.tcp_udp_port,
+ 'rpc_virtual_host': parameter_types.hostname_or_ip_address,
+ },
+ 'additionalProperties': False,
+ },
+ },
+ 'required': ['cell'],
+ 'additionalProperties': False,
+}
+
+
+sync_instances = {
+ 'type': 'object',
+ 'properties': {
+ 'project_id': parameter_types.project_id,
+ 'deleted': parameter_types.boolean,
+ 'updated_since': {
+ 'type': 'string',
+ 'format': 'date-time',
+ },
+ },
+ 'additionalProperties': False,
+}
diff --git a/nova/tests/glance/__init__.py b/nova/api/openstack/compute/schemas/v3/config_drive.py
similarity index 77%
rename from nova/tests/glance/__init__.py
rename to nova/api/openstack/compute/schemas/v3/config_drive.py
index eac840c7ed..659423ea24 100644
--- a/nova/tests/glance/__init__.py
+++ b/nova/api/openstack/compute/schemas/v3/config_drive.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-:mod:`glance` -- Stubs for Glance
-=================================
-"""
+from nova.api.validation import parameter_types
+
+server_create = {
+ 'config_drive': parameter_types.boolean,
+}
diff --git a/nova/api/openstack/compute/schemas/v3/evacuate.py b/nova/api/openstack/compute/schemas/v3/evacuate.py
index c48b45560a..a71e995dc6 100644
--- a/nova/api/openstack/compute/schemas/v3/evacuate.py
+++ b/nova/api/openstack/compute/schemas/v3/evacuate.py
@@ -25,7 +25,7 @@
'on_shared_storage': parameter_types.boolean,
'admin_password': parameter_types.admin_password,
},
- 'required': ['host', 'on_shared_storage'],
+ 'required': ['on_shared_storage'],
'additionalProperties': False,
},
},
diff --git a/nova/api/openstack/compute/schemas/v3/flavor_manage.py b/nova/api/openstack/compute/schemas/v3/flavor_manage.py
index 173dd1783c..48c4fb2d83 100644
--- a/nova/api/openstack/compute/schemas/v3/flavor_manage.py
+++ b/nova/api/openstack/compute/schemas/v3/flavor_manage.py
@@ -62,7 +62,14 @@
},
'flavor-access:is_public': parameter_types.boolean,
},
- 'required': ['name', 'id', 'ram', 'vcpus', 'disk'],
+ # TODO(oomichi): 'id' should be required with v2.1+microversions.
+ # On v2.0 API, nova-api generates a flavor-id automatically if
+ # specifying null as 'id' or not specifying 'id'. Ideally a client
+ # should specify null as 'id' for requesting auto-generated id
+ # exactly. However, this strict limitation causes a backwards
+ # incompatible issue on v2.1. So now here relaxes the requirement
+ # of 'id'.
+ 'required': ['name', 'ram', 'vcpus', 'disk'],
'additionalProperties': False,
},
},
diff --git a/nova/api/openstack/compute/schemas/v3/flavors_extraspecs.py b/nova/api/openstack/compute/schemas/v3/flavors_extraspecs.py
new file mode 100644
index 0000000000..0f702b78e1
--- /dev/null
+++ b/nova/api/openstack/compute/schemas/v3/flavors_extraspecs.py
@@ -0,0 +1,33 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from nova.api.validation import parameter_types
+
+create = {
+ 'type': 'object',
+ 'properties': {
+ 'extra_specs': parameter_types.metadata
+ },
+ 'required': ['extra_specs'],
+ 'additionalProperties': False,
+}
+
+
+update = copy.deepcopy(parameter_types.metadata)
+update.update({
+ 'minProperties': 1,
+ 'maxProperties': 1
+})
diff --git a/nova/api/openstack/compute/schemas/v3/hosts.py b/nova/api/openstack/compute/schemas/v3/hosts.py
new file mode 100644
index 0000000000..30ec09f40a
--- /dev/null
+++ b/nova/api/openstack/compute/schemas/v3/hosts.py
@@ -0,0 +1,43 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+update = {
+ 'type': 'object',
+ 'properties': {
+ 'host': {
+ 'type': 'object',
+ 'properties': {
+ 'status': {
+ 'type': 'string',
+ 'enum': ['enable', 'disable',
+ 'Enable', 'Disable',
+ 'ENABLE', 'DISABLE'],
+ },
+ 'maintenance_mode': {
+ 'type': 'string',
+ 'enum': ['enable', 'disable',
+ 'Enable', 'Disable',
+ 'ENABLE', 'DISABLE'],
+ },
+ },
+ 'anyOf': [
+ {'required': ['status']},
+ {'required': ['maintenance_mode']}
+ ],
+ 'additionalProperties': False,
+ },
+ },
+ 'required': ['host'],
+ 'additionalProperties': False,
+}
diff --git a/nova/api/openstack/compute/schemas/v3/keypairs.py b/nova/api/openstack/compute/schemas/v3/keypairs.py
index 08b1961247..8d4c9f2d23 100644
--- a/nova/api/openstack/compute/schemas/v3/keypairs.py
+++ b/nova/api/openstack/compute/schemas/v3/keypairs.py
@@ -31,3 +31,7 @@
'required': ['keypair'],
'additionalProperties': False,
}
+
+server_create = {
+ 'key_name': parameter_types.name,
+}
diff --git a/nova/api/openstack/compute/schemas/v3/reset_server_state.py b/nova/api/openstack/compute/schemas/v3/reset_server_state.py
new file mode 100644
index 0000000000..ca8bd09337
--- /dev/null
+++ b/nova/api/openstack/compute/schemas/v3/reset_server_state.py
@@ -0,0 +1,32 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+reset_state = {
+ 'type': 'object',
+ 'properties': {
+ 'reset_state': {
+ 'type': 'object',
+ 'properties': {
+ 'state': {
+ 'type': 'string',
+ 'enum': ['active', 'error'],
+ },
+ },
+ 'required': ['state'],
+ 'additionalProperties': False,
+ },
+ },
+ 'required': ['reset_state'],
+ 'additionalProperties': False,
+}
diff --git a/nova/api/openstack/compute/schemas/v3/security_groups.py b/nova/api/openstack/compute/schemas/v3/security_groups.py
new file mode 100644
index 0000000000..aafd296e68
--- /dev/null
+++ b/nova/api/openstack/compute/schemas/v3/security_groups.py
@@ -0,0 +1,28 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.validation import parameter_types
+
+server_create = {
+ 'os-security-groups:security_groups': {
+ 'type': 'array',
+ 'items': {
+ 'type': 'object',
+ 'properties': {
+ 'name': parameter_types.name,
+ },
+ 'additionalProperties': False,
+ }
+ },
+}
diff --git a/nova/api/openstack/compute/schemas/v3/servers.py b/nova/api/openstack/compute/schemas/v3/servers.py
new file mode 100644
index 0000000000..a4a91b13c1
--- /dev/null
+++ b/nova/api/openstack/compute/schemas/v3/servers.py
@@ -0,0 +1,36 @@
+# Copyright 2014 NEC Corporation. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+base_create = {
+ 'type': 'object',
+ 'properties': {
+ 'server': {
+ 'type': 'object',
+ 'properties': {
+ # TODO(oomichi): To focus the schema extension, now these
+ # properties are not defined. After it, we need to define
+ # them.
+ # 'name': ...
+ },
+ # TODO(oomichi): After all extension schema patches are merged,
+ # this code should be enabled. If enabling before merger, API
+ # extension parameters would be considered as bad parameters.
+ # 'additionalProperties': False,
+ },
+ },
+ 'required': ['server'],
+ # TODO(oomichi): ditto, enable here after all extension schema
+ # patches are merged.
+ # 'additionalProperties': False,
+}
diff --git a/nova/api/openstack/compute/server_metadata.py b/nova/api/openstack/compute/server_metadata.py
index 7e11d1184c..68cc188e0d 100644
--- a/nova/api/openstack/compute/server_metadata.py
+++ b/nova/api/openstack/compute/server_metadata.py
@@ -19,7 +19,7 @@
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
class Controller(object):
@@ -134,9 +134,7 @@ def _update_instance_metadata(self, context, server_id, metadata,
explanation=error.format_message())
except exception.QuotaError as error:
- raise exc.HTTPRequestEntityTooLarge(
- explanation=error.format_message(),
- headers={'Retry-After': 0})
+ raise exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index ab4f262ba3..678bb6a7fd 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -33,9 +33,9 @@
from nova import compute
from nova.compute import flavors
from nova import exception
+from nova.i18n import _
+from nova.i18n import _LW
from nova import objects
-from nova.objects import instance as instance_obj
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
@@ -79,8 +79,8 @@ def make_server(elem, detailed=False):
global XML_WARNING
if not XML_WARNING:
- LOG.warning(_('XML support has been deprecated and may be removed '
- 'as early as the Juno release.'))
+ LOG.warn(_LW('XML support has been deprecated and may be removed '
+ 'as early as the Juno release.'))
XML_WARNING = True
if detailed:
@@ -124,7 +124,7 @@ class ServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root, detailed=True)
- return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
+ return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap)
class MinimalServersTemplate(xmlutil.TemplateBuilder):
@@ -133,7 +133,7 @@ def construct(self):
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
xmlutil.make_links(root, 'servers_links')
- return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
+ return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap)
class ServersTemplate(xmlutil.TemplateBuilder):
@@ -141,27 +141,27 @@ def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem, detailed=True)
- return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
+ return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap)
class ServerAdminPassTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('adminPass')
- return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap)
+ return xmlutil.SubordinateTemplate(root, 1, nsmap=server_nsmap)
class ServerMultipleCreateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('reservation_id')
- return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
+ return xmlutil.MainTemplate(root, 1, nsmap=server_nsmap)
def FullServerTemplate():
- master = ServerTemplate()
- master.attach(ServerAdminPassTemplate())
- return master
+ main = ServerTemplate()
+ main.attach(ServerAdminPassTemplate())
+ return main
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
@@ -534,9 +534,11 @@ def _get_servers(self, req, is_detail):
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
- status = search_opts.pop('status', None)
- if status is not None:
- vm_state, task_state = common.task_and_vm_state_from_status(status)
+ search_opts.pop('status', None)
+ if 'status' in req.GET.keys():
+ statuses = req.GET.getall('status')
+ states = common.task_and_vm_state_from_status(statuses)
+ vm_state, task_state = states
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
@@ -600,14 +602,11 @@ def _get_servers(self, req, is_detail):
limit=limit,
marker=marker,
want_objects=True)
- for instance in instance_list:
- instance_obj.add_image_ref(context, instance)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
- log_msg = _("Flavor '%s' could not be found ")
- LOG.debug(log_msg, search_opts['flavor'])
+ LOG.debug("Flavor '%s' could not be found", search_opts['flavor'])
# TODO(mriedem): Move to ObjectListBase.__init__ for empty lists.
instance_list = objects.InstanceList(objects=[])
@@ -695,9 +694,9 @@ def _get_requested_networks(self, requested_networks):
"(%s)") % network_uuid
raise exc.HTTPBadRequest(explanation=msg)
- #fixed IP address is optional
- #if the fixed IP address is not provided then
- #it will use one of the available IP address from the network
+ # fixed IP address is optional
+ # if the fixed IP address is not provided then
+ # it will use one of the available IP address from the network
address = network.get('fixed_ip', None)
if address is not None and not utils.is_valid_ip_address(
address):
@@ -769,8 +768,7 @@ def show(self, req, id):
context = req.environ['nova.context']
instance = self.compute_api.get(context, id,
want_objects=True)
- req.cache_db_instance(instance_obj.add_image_ref(context,
- instance))
+ req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _("Instance could not be found")
@@ -959,7 +957,7 @@ def create(self, req, body):
legacy_bdm=legacy_bdm)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
- raise exc.HTTPRequestEntityTooLarge(
+ raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InvalidMetadataSize as error:
@@ -987,23 +985,18 @@ def create(self, req, body):
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
- exception.InvalidMetadata,
- exception.InvalidRequest,
- exception.MultiplePortsNotApplicable,
- exception.InvalidFixedIpAndMaxCountRequest,
exception.NetworkNotFound,
exception.PortNotFound,
exception.FixedIpAlreadyInUse,
exception.SecurityGroupNotFound,
- exception.InvalidBDM,
- exception.PortRequiresFixedIP,
- exception.NetworkRequiresSubnet,
exception.InstanceUserDataTooLarge,
exception.InstanceUserDataMalformed) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
+ except exception.Invalid as error:
+ raise exc.HTTPBadRequest(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if ret_resv_id:
@@ -1168,7 +1161,7 @@ def _resize(self, req, instance_id, flavor_id, **kwargs):
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
- raise exc.HTTPRequestEntityTooLarge(
+ raise exc.HTTPForbidden(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
@@ -1177,6 +1170,8 @@ def _resize(self, req, instance_id, flavor_id, **kwargs):
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
+ except exception.CannotResizeDisk as e:
+ raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
@@ -1193,7 +1188,8 @@ def _resize(self, req, instance_id, flavor_id, **kwargs):
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
- except exception.NoValidHost as e:
+ except (exception.NoValidHost,
+ exception.AutoDiskConfigDisabledByImage) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
return webob.Response(status_int=202)
@@ -1397,10 +1393,13 @@ def _action_rebuild(self, req, id, body):
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
+ except exception.QuotaError as error:
+ raise exc.HTTPForbidden(explanation=error.format_message())
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
- exception.InvalidMetadata) as error:
+ exception.InvalidMetadata,
+ exception.AutoDiskConfigDisabledByImage) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
@@ -1450,10 +1449,10 @@ def _action_create_image(self, req, id, body):
bdms):
img = instance['image_ref']
if not img:
- props = bdms.root_metadata(
+ properties = bdms.root_metadata(
context, self.compute_api.image_api,
self.compute_api.volume_api)
- image_meta = {'properties': props}
+ image_meta = {'properties': properties}
else:
image_meta = self.compute_api.image_api.get(context, img)
diff --git a/nova/api/openstack/compute/views/images.py b/nova/api/openstack/compute/views/images.py
index b6d735129f..054fcfb1f8 100644
--- a/nova/api/openstack/compute/views/images.py
+++ b/nova/api/openstack/compute/views/images.py
@@ -147,20 +147,3 @@ def _get_progress(image):
"saving": 50,
"active": 100,
}.get(image.get("status"), 0)
-
-
-class ViewBuilderV3(ViewBuilder):
-
- def _get_bookmark_link(self, request, identifier, collection_name):
- """Create a URL that refers to a specific resource."""
- if collection_name == "images":
- glance_url = glance.generate_image_url(identifier)
- return self._update_glance_link_prefix(glance_url)
- else:
- raise NotImplementedError
- # NOTE(cyeoh) The V3 version of _get_bookmark_link should
- # only ever be called with images as the
- # collection_name. The images API has been removed in the
- # V3 API and the V3 version of the view only exists for
- # the servers view to be able to generate the appropriate
- # bookmark link for the image of the instance.
diff --git a/nova/api/openstack/compute/views/servers.py b/nova/api/openstack/compute/views/servers.py
index f09d4272e2..621e9617c1 100644
--- a/nova/api/openstack/compute/views/servers.py
+++ b/nova/api/openstack/compute/views/servers.py
@@ -21,8 +21,8 @@
from nova.api.openstack.compute.views import flavors as views_flavors
from nova.api.openstack.compute.views import images as views_images
from nova.compute import flavors
+from nova.i18n import _LW
from nova.objects import base as obj_base
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
@@ -194,8 +194,8 @@ def _get_image(self, request, instance):
def _get_flavor(self, request, instance):
instance_type = flavors.extract_flavor(instance)
if not instance_type:
- LOG.warn(_("Instance has had its instance_type removed "
- "from the DB"), instance=instance)
+ LOG.warn(_LW("Instance has had its instance_type removed "
+ "from the DB"), instance=instance)
return {}
flavor_id = instance_type["flavorid"]
flavor_bookmark = self._flavor_builder._get_bookmark_link(request,
@@ -241,7 +241,9 @@ def __init__(self):
"""Initialize view builder."""
super(ViewBuilderV3, self).__init__()
self._address_builder = views_addresses.ViewBuilderV3()
- self._image_builder = views_images.ViewBuilderV3()
+ # TODO(alex_xu): In V3 API, we correct the image bookmark link to
+ # use glance endpoint. We revert back it to use nova endpoint for v2.1.
+ self._image_builder = views_images.ViewBuilder()
def show(self, request, instance):
"""Detailed view of a single instance."""
@@ -253,7 +255,10 @@ def show(self, request, instance):
"tenant_id": instance.get("project_id") or "",
"user_id": instance.get("user_id") or "",
"metadata": self._get_metadata(instance),
- "host_id": self._get_host_id(instance) or "",
+ "hostId": self._get_host_id(instance) or "",
+ # TODO(alex_xu): '_get_image' return {} when there image_ref
+ # isn't existed in V3 API, we revert it back to return "" in
+ # V2.1.
"image": self._get_image(request, instance),
"flavor": self._get_flavor(request, instance),
"created": timeutils.isotime(instance["created_at"]),
@@ -272,7 +277,4 @@ def show(self, request, instance):
if server["server"]["status"] in self._progress_statuses:
server["server"]["progress"] = instance.get("progress", 0)
- # We should modify the "image" to empty dictionary
- if not server["server"]["image"]:
- server["server"]["image"] = {}
return server
diff --git a/nova/api/openstack/compute/views/versions.py b/nova/api/openstack/compute/views/versions.py
index 242d93f1ad..572d73bd64 100644
--- a/nova/api/openstack/compute/views/versions.py
+++ b/nova/api/openstack/compute/views/versions.py
@@ -32,7 +32,7 @@ def __init__(self, base_url):
def build_choices(self, VERSIONS, req):
version_objs = []
- for version in VERSIONS:
+ for version in sorted(VERSIONS):
version = VERSIONS[version]
version_objs.append({
"id": version['id'],
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
index 5ea32160b6..cb23a67605 100644
--- a/nova/api/openstack/extensions.py
+++ b/nova/api/openstack/extensions.py
@@ -26,7 +26,8 @@
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova.i18n import _LW
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
import nova.policy
@@ -275,8 +276,8 @@ def _load_extensions(self):
try:
self.load_extension(ext_factory)
except Exception as exc:
- LOG.warn(_('Failed to load extension %(ext_factory)s: '
- '%(exc)s'),
+ LOG.warn(_LW('Failed to load extension %(ext_factory)s: '
+ '%(exc)s'),
{'ext_factory': ext_factory, 'exc': exc})
@@ -394,8 +395,8 @@ def extension_authorizer(api_name, extension_name):
return core_authorizer('%s_extension' % api_name, extension_name)
-def soft_extension_authorizer(api_name, extension_name):
- hard_authorize = extension_authorizer(api_name, extension_name)
+def soft_authorizer(hard_authorizer, api_name, extension_name):
+ hard_authorize = hard_authorizer(api_name, extension_name)
def authorize(context, action=None):
try:
@@ -406,6 +407,14 @@ def authorize(context, action=None):
return authorize
+def soft_extension_authorizer(api_name, extension_name):
+ return soft_authorizer(extension_authorizer, api_name, extension_name)
+
+
+def soft_core_authorizer(api_name, extension_name):
+ return soft_authorizer(core_authorizer, api_name, extension_name)
+
+
def check_compute_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index d8458af2f5..9a0b892bae 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -25,8 +25,10 @@
from nova.api.openstack import xmlutil
from nova import exception
-from nova.openstack.common import gettextutils
-from nova.openstack.common.gettextutils import _
+from nova import i18n
+from nova.i18n import _
+from nova.i18n import _LE
+from nova.i18n import _LI
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
@@ -193,7 +195,7 @@ def best_match_language(self):
if not self.accept_language:
return None
return self.accept_language.best_match(
- gettextutils.get_available_languages('nova'))
+ i18n.get_available_languages())
class ActionDispatcher(object):
@@ -373,7 +375,7 @@ def to_xml_string(self, node, has_atom=False):
self._add_xmlns(node, has_atom)
return node.toxml('UTF-8')
- #NOTE (ameade): the has_atom should be removed after all of the
+ # NOTE (ameade): the has_atom should be removed after all of the
# xml serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
@@ -393,7 +395,7 @@ def _to_xml_node(self, doc, metadata, nodename, data):
if xmlns:
result.setAttribute('xmlns', xmlns)
- #TODO(bcwaldon): accomplish this without a type-check
+ # TODO(bcwaldon): accomplish this without a type-check
if isinstance(data, list):
collections = metadata.get('list_collections', {})
if nodename in collections:
@@ -412,7 +414,7 @@ def _to_xml_node(self, doc, metadata, nodename, data):
for item in data:
node = self._to_xml_node(doc, metadata, singular, item)
result.appendChild(node)
- #TODO(bcwaldon): accomplish this without a type-check
+ # TODO(bcwaldon): accomplish this without a type-check
elif isinstance(data, dict):
collections = metadata.get('dict_collections', {})
if nodename in collections:
@@ -582,7 +584,7 @@ def preserialize(self, content_type, default_serializers=None):
self.serializer = serializer()
def attach(self, **kwargs):
- """Attach slave templates to serializers."""
+ """Attach subordinate templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
@@ -679,14 +681,14 @@ def __exit__(self, ex_type, ex_value, ex_traceback):
# http://bugs.python.org/issue7853
elif issubclass(ex_type, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
- LOG.error(_('Exception handling resource: %s') % ex_value,
- exc_info=exc_info)
+ LOG.error(_LE('Exception handling resource: %s'), ex_value,
+ exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
- LOG.info(_("Fault thrown: %s"), unicode(ex_value))
+ LOG.info(_LI("Fault thrown: %s"), unicode(ex_value))
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
- LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value))
+ LOG.info(_LI("HTTP exception thrown: %s"), unicode(ex_value))
raise Fault(ex_value)
# We didn't handle the exception
@@ -937,7 +939,7 @@ def _process_stack(self, request, action, action_args,
try:
contents = {}
if self._should_have_body(request):
- #allow empty body with PUT and POST
+ # allow empty body with PUT and POST
if request.content_length == 0:
contents = {'body': None}
else:
@@ -1197,8 +1199,7 @@ def __call__(self, req):
LOG.debug("Returning %(code)s to user: %(explanation)s",
{'code': code, 'explanation': explanation})
- explanation = gettextutils.translate(explanation,
- user_locale)
+ explanation = i18n.translate(explanation, user_locale)
fault_data = {
fault_name: {
'code': code,
@@ -1261,13 +1262,9 @@ def __call__(self, request):
metadata = {"attributes": {"overLimit": ["code", "retryAfter"]}}
self.content['overLimit']['message'] = \
- gettextutils.translate(
- self.content['overLimit']['message'],
- user_locale)
+ i18n.translate(self.content['overLimit']['message'], user_locale)
self.content['overLimit']['details'] = \
- gettextutils.translate(
- self.content['overLimit']['details'],
- user_locale)
+ i18n.translate(self.content['overLimit']['details'], user_locale)
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
serializer = {
diff --git a/nova/api/openstack/xmlutil.py b/nova/api/openstack/xmlutil.py
index 68e50f82ea..e2ae2acd85 100644
--- a/nova/api/openstack/xmlutil.py
+++ b/nova/api/openstack/xmlutil.py
@@ -23,7 +23,7 @@
import six
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova import utils
@@ -672,13 +672,13 @@ def wrap(self):
# We are a template
return self
- def apply(self, master):
- """Hook method for determining slave applicability.
+ def apply(self, main):
+ """Hook method for determining subordinate applicability.
An overridable hook method used to determine if this template
- is applicable as a slave to a given master template.
+ is applicable as a subordinate to a given main template.
- :param master: The master template to test.
+ :param main: The main template to test.
"""
return True
@@ -693,17 +693,17 @@ def tree(self):
return "%r: %s" % (self, self.root.tree())
-class MasterTemplate(Template):
- """Represent a master template.
+class MainTemplate(Template):
+ """Represent a main template.
- Master templates are versioned derivatives of templates that
- additionally allow slave templates to be attached. Slave
+ Main templates are versioned derivatives of templates that
+ additionally allow subordinate templates to be attached. Subordinate
templates allow modification of the serialized result without
- directly changing the master.
+ directly changing the main.
"""
def __init__(self, root, version, nsmap=None):
- """Initialize a master template.
+ """Initialize a main template.
:param root: The root element of the template.
:param version: The version number of the template.
@@ -712,9 +712,9 @@ def __init__(self, root, version, nsmap=None):
template.
"""
- super(MasterTemplate, self).__init__(root, nsmap)
+ super(MainTemplate, self).__init__(root, nsmap)
self.version = version
- self.slaves = []
+ self.subordinates = []
def __repr__(self):
"""Return string representation of the template."""
@@ -728,88 +728,88 @@ def _siblings(self):
An overridable hook method to return the siblings of the root
element. This is the root element plus the root elements of
- all the slave templates.
+ all the subordinate templates.
"""
- return [self.root] + [slave.root for slave in self.slaves]
+ return [self.root] + [subordinate.root for subordinate in self.subordinates]
def _nsmap(self):
"""Hook method for computing the namespace dictionary.
An overridable hook method to return the namespace dictionary.
- The namespace dictionary is computed by taking the master
+ The namespace dictionary is computed by taking the main
template's namespace dictionary and updating it from all the
- slave templates.
+ subordinate templates.
"""
nsmap = self.nsmap.copy()
- for slave in self.slaves:
- nsmap.update(slave._nsmap())
+ for subordinate in self.subordinates:
+ nsmap.update(subordinate._nsmap())
return nsmap
- def attach(self, *slaves):
- """Attach one or more slave templates.
+ def attach(self, *subordinates):
+ """Attach one or more subordinate templates.
- Attaches one or more slave templates to the master template.
- Slave templates must have a root element with the same tag as
- the master template. The slave template's apply() method will
- be called to determine if the slave should be applied to this
- master; if it returns False, that slave will be skipped.
- (This allows filtering of slaves based on the version of the
- master template.)
+ Attaches one or more subordinate templates to the main template.
+ Subordinate templates must have a root element with the same tag as
+ the main template. The subordinate template's apply() method will
+ be called to determine if the subordinate should be applied to this
+ main; if it returns False, that subordinate will be skipped.
+ (This allows filtering of subordinates based on the version of the
+ main template.)
"""
- slave_list = []
- for slave in slaves:
- slave = slave.wrap()
+ subordinate_list = []
+ for subordinate in subordinates:
+ subordinate = subordinate.wrap()
# Make sure we have a tree match
- if slave.root.tag != self.root.tag:
- msg = _("Template tree mismatch; adding slave %(slavetag)s to "
- "master %(mastertag)s") % {'slavetag': slave.root.tag,
- 'mastertag': self.root.tag}
+ if subordinate.root.tag != self.root.tag:
+ msg = _("Template tree mismatch; adding subordinate %(subordinatetag)s to "
+ "main %(maintag)s") % {'subordinatetag': subordinate.root.tag,
+ 'maintag': self.root.tag}
raise ValueError(msg)
- # Make sure slave applies to this template
- if not slave.apply(self):
+ # Make sure subordinate applies to this template
+ if not subordinate.apply(self):
continue
- slave_list.append(slave)
+ subordinate_list.append(subordinate)
- # Add the slaves
- self.slaves.extend(slave_list)
+ # Add the subordinates
+ self.subordinates.extend(subordinate_list)
def copy(self):
- """Return a copy of this master template."""
+ """Return a copy of this main template."""
- # Return a copy of the MasterTemplate
+ # Return a copy of the MainTemplate
tmp = self.__class__(self.root, self.version, self.nsmap)
- tmp.slaves = self.slaves[:]
+ tmp.subordinates = self.subordinates[:]
return tmp
-class SlaveTemplate(Template):
- """Represent a slave template.
+class SubordinateTemplate(Template):
+ """Represent a subordinate template.
- Slave templates are versioned derivatives of templates. Each
- slave has a minimum version and optional maximum version of the
- master template to which they can be attached.
+ Subordinate templates are versioned derivatives of templates. Each
+ subordinate has a minimum version and optional maximum version of the
+ main template to which they can be attached.
"""
def __init__(self, root, min_vers, max_vers=None, nsmap=None):
- """Initialize a slave template.
+ """Initialize a subordinate template.
:param root: The root element of the template.
- :param min_vers: The minimum permissible version of the master
- template for this slave template to apply.
- :param max_vers: An optional upper bound for the master
+ :param min_vers: The minimum permissible version of the main
+ template for this subordinate template to apply.
+ :param max_vers: An optional upper bound for the main
template version.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
- super(SlaveTemplate, self).__init__(root, nsmap)
+ super(SubordinateTemplate, self).__init__(root, nsmap)
self.min_vers = min_vers
self.max_vers = max_vers
@@ -820,23 +820,23 @@ def __repr__(self):
(self.__class__.__module__, self.__class__.__name__,
self.min_vers, self.max_vers, id(self)))
- def apply(self, master):
- """Hook method for determining slave applicability.
+ def apply(self, main):
+ """Hook method for determining subordinate applicability.
An overridable hook method used to determine if this template
- is applicable as a slave to a given master template. This
- version requires the master template to have a version number
+ is applicable as a subordinate to a given main template. This
+ version requires the main template to have a version number
between min_vers and max_vers.
- :param master: The master template to test.
+ :param main: The main template to test.
"""
- # Does the master meet our minimum version requirement?
- if master.version < self.min_vers:
+ # Does the main meet our minimum version requirement?
+ if main.version < self.min_vers:
return False
# How about our maximum version requirement?
- if self.max_vers is not None and master.version > self.max_vers:
+ if self.max_vers is not None and main.version > self.max_vers:
return False
return True
@@ -990,7 +990,7 @@ def safe_minidom_parse_string(xml_string):
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
except (sax.SAXParseException, ValueError,
expat.ExpatError, LookupError) as e:
- #NOTE(Vijaya Erukala): XML input such as
+ # NOTE(Vijaya Erukala): XML input such as
#
# raises LookupError: unknown encoding: TF-8
raise exception.MalformedRequestBody(reason=str(e))
diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py
index 0248cbacb0..aa5c42e6aa 100644
--- a/nova/api/sizelimit.py
+++ b/nova/api/sizelimit.py
@@ -20,11 +20,11 @@
import webob.dec
import webob.exc
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova import wsgi
-#default request size is 112k
+# default request size is 112k
max_request_body_size_opt = cfg.IntOpt('osapi_max_request_body_size',
default=114688,
help='The maximum body size '
diff --git a/nova/api/validation/parameter_types.py b/nova/api/validation/parameter_types.py
index 8c425df43e..fc2d491a9a 100644
--- a/nova/api/validation/parameter_types.py
+++ b/nova/api/validation/parameter_types.py
@@ -49,7 +49,13 @@
'type': 'string', 'minLength': 1, 'maxLength': 255,
# NOTE: Allow to some spaces in middle of name.
- 'pattern': '^(?! )[a-zA-Z0-9. _-]+(? 120:
+ raise
+ LOG.exception(_('DB error: %s') % e)
+ time.sleep(30)
my_cell_capabs = {}
for cap in CONF.cells.capabilities:
diff --git a/nova/cells/weights/mute_child.py b/nova/cells/weights/mute_child.py
index ff42d2673f..cc5c0a8c44 100644
--- a/nova/cells/weights/mute_child.py
+++ b/nova/cells/weights/mute_child.py
@@ -21,7 +21,7 @@
from oslo.config import cfg
from nova.cells import weights
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 8ac164cbb3..8962924b69 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -31,8 +31,8 @@
from nova import crypto
from nova import db
from nova import exception
+from nova.i18n import _
from nova.openstack.common import fileutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import paths
from nova import utils
diff --git a/nova/cmd/__init__.py b/nova/cmd/__init__.py
index cbe5ae351e..5f1129d9c1 100644
--- a/nova/cmd/__init__.py
+++ b/nova/cmd/__init__.py
@@ -16,13 +16,18 @@
# TODO(mikal): move eventlet imports to nova.__init__ once we move to PBR
import os
import sys
+import traceback
# NOTE(mikal): All of this is because if dnspython is present in your
# environment then eventlet monkeypatches socket.getaddrinfo() with an
# implementation which doesn't work for IPv6. What we're checking here is
# that the magic environment variable was set when the import happened.
+# NOTE(dims): Prevent this code from kicking in under docs generation
+# as it leads to spurious errors/warning.
+stack = traceback.extract_stack()
if ('eventlet' in sys.modules and
- os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
+ os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes' and
+ (len(stack) < 2 or 'sphinx' not in stack[-2][0])):
raise ImportError('eventlet imported before nova/cmd/__init__ '
'(env var set to %s)'
% os.environ.get('EVENTLET_NO_GREENDNS'))
diff --git a/nova/cmd/all.py b/nova/cmd/all.py
index 75946cd29c..7d7c9d43b1 100644
--- a/nova/cmd/all.py
+++ b/nova/cmd/all.py
@@ -29,9 +29,9 @@
from oslo.config import cfg
from nova import config
+from nova.i18n import _
from nova import objects
from nova.objectstore import s3server
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import service
from nova import utils
diff --git a/nova/cmd/baremetal_deploy_helper.py b/nova/cmd/baremetal_deploy_helper.py
index fd997ce8e1..b4f546d1d9 100644
--- a/nova/cmd/baremetal_deploy_helper.py
+++ b/nova/cmd/baremetal_deploy_helper.py
@@ -29,9 +29,9 @@
from nova import config
from nova import context as nova_context
+from nova.i18n import _
from nova import objects
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.openstack.common import units
@@ -134,7 +134,7 @@ def mkswap(dev, label='swap1'):
def mkfs_ephemeral(dev, label="ephemeral0"):
- #TODO(jogo) support non-default mkfs options as well
+ # TODO(jogo) support non-default mkfs options as well
disk.mkfs("default", label, dev)
diff --git a/nova/cmd/baremetal_manage.py b/nova/cmd/baremetal_manage.py
index b62f744b56..e8283221ec 100644
--- a/nova/cmd/baremetal_manage.py
+++ b/nova/cmd/baremetal_manage.py
@@ -58,9 +58,9 @@
import six
from nova import config
+from nova.i18n import _
from nova import objects
from nova.openstack.common import cliutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import version
from nova.virt.baremetal.db import migration as bmdb_migration
diff --git a/nova/cmd/compute.py b/nova/cmd/compute.py
index 52f3c93ab9..bad4cf76d7 100644
--- a/nova/cmd/compute.py
+++ b/nova/cmd/compute.py
@@ -25,9 +25,9 @@
from nova import config
import nova.db.api
from nova import exception
+from nova.i18n import _
from nova import objects
from nova.objects import base as objects_base
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
diff --git a/nova/cmd/dhcpbridge.py b/nova/cmd/dhcpbridge.py
index 1045020065..114b7484a2 100644
--- a/nova/cmd/dhcpbridge.py
+++ b/nova/cmd/dhcpbridge.py
@@ -31,11 +31,10 @@
from nova import context
import nova.db.api
from nova import exception
+from nova.i18n import _
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as objects_base
-from nova.objects import network as network_obj
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -72,7 +71,7 @@ def del_lease(mac, ip_address):
def init_leases(network_id):
"""Get the list of hosts for a network."""
ctxt = context.get_admin_context()
- network = network_obj.Network.get_by_id(ctxt, network_id)
+ network = objects.Network.get_by_id(ctxt, network_id)
network_manager = importutils.import_object(CONF.network_manager)
return network_manager.get_dhcp_leases(ctxt, network)
diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py
index 5f75cb0223..125439c452 100644
--- a/nova/cmd/manage.py
+++ b/nova/cmd/manage.py
@@ -72,10 +72,10 @@
from nova import db
from nova.db import migration
from nova import exception
+from nova.i18n import _
from nova import objects
from nova.openstack.common import cliutils
from nova.openstack.common.db import exception as db_exc
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import quota
@@ -614,10 +614,10 @@ def modify(self, fixed_range, project=None, host=None,
admin_context = context.get_admin_context()
network = db.network_get_by_cidr(admin_context, fixed_range)
net = {}
- #User can choose the following actions each for project and host.
- #1) Associate (set not None value given by project/host parameter)
- #2) Disassociate (set None by disassociate parameter)
- #3) Keep unchanged (project/host key is not added to 'net')
+ # User can choose the following actions each for project and host.
+ # 1) Associate (set not None value given by project/host parameter)
+ # 2) Disassociate (set None by disassociate parameter)
+ # 3) Keep unchanged (project/host key is not added to 'net')
if dis_project:
net['project_id'] = None
if dis_host:
diff --git a/nova/cmd/network.py b/nova/cmd/network.py
index 73d5c89bf2..490097aa71 100644
--- a/nova/cmd/network.py
+++ b/nova/cmd/network.py
@@ -25,9 +25,9 @@
from nova import config
import nova.db.api
from nova import exception
+from nova.i18n import _
from nova import objects
from nova.objects import base as objects_base
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 612e1c1da2..9ae5f1d262 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -43,6 +43,8 @@
from nova.db import base
from nova import exception
from nova import hooks
+from nova.i18n import _
+from nova.i18n import _LE
from nova import image
from nova import network
from nova.network import model as network_model
@@ -54,7 +56,6 @@
from nova.objects import quotas as quotas_obj
from nova.objects import security_group as security_group_obj
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
@@ -330,8 +331,9 @@ def _check_num_instances_quota(self, context, instance_type, min_count,
# Check the quota
try:
- reservations = QUOTAS.reserve(context, instances=max_count,
- cores=req_cores, ram=req_ram)
+ quotas = objects.Quotas(context)
+ quotas.reserve(context, instances=max_count,
+ cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
@@ -387,7 +389,7 @@ def _check_num_instances_quota(self, context, instance_type, min_count,
used=used, allowed=total_allowed,
resource=resource)
- return max_count, reservations
+ return max_count, quotas
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
@@ -400,10 +402,6 @@ def _check_metadata_properties_quota(self, context, metadata=None):
try:
QUOTAS.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
- LOG.warn(_("Quota exceeded for %(pid)s, tried to set "
- "%(num_metadata)s metadata properties"),
- {'pid': context.project_id,
- 'num_metadata': num_metadata})
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
@@ -411,16 +409,14 @@ def _check_metadata_properties_quota(self, context, metadata=None):
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in metadata.iteritems():
- if not isinstance(k, six.string_types):
- msg = _("Metadata property key '%s' is not a string.") % k
- raise exception.InvalidMetadata(reason=msg)
- if not isinstance(v, six.string_types):
- msg = (_("Metadata property value '%(v)s' for key '%(k)s' is "
- "not a string.") % {'v': v, 'k': k})
- raise exception.InvalidMetadata(reason=msg)
- if len(k) == 0:
- msg = _("Metadata property key blank")
- raise exception.InvalidMetadata(reason=msg)
+ try:
+ utils.check_string_length(v)
+ utils.check_string_length(k, min_length=1)
+ except exception.InvalidInput as e:
+ raise exception.InvalidMetadata(reason=e.format_message())
+
+ # For backward compatible we need raise HTTPRequestEntityTooLarge
+ # so we need to keep InvalidMetadataSize exception here
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
@@ -562,8 +558,8 @@ def _apply_instance_name_template(self, context, instance, index):
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
- LOG.exception(_('Failed to set instance name using '
- 'multi_instance_display_name_template.'))
+ LOG.exception(_LE('Failed to set instance name using '
+ 'multi_instance_display_name_template.'))
new_name = instance['display_name']
instance.display_name = new_name
if not instance.get('hostname', None):
@@ -747,8 +743,9 @@ def _validate_and_build_base_options(self, context, instance_type,
key_name)
key_data = key_pair.public_key
- root_device_name = block_device.properties_root_device_name(
- boot_meta.get('properties', {}))
+ root_device_name = block_device.prepend_dev(
+ block_device.properties_root_device_name(
+ boot_meta.get('properties', {})))
system_metadata = flavors.save_flavor_info(
dict(), instance_type)
@@ -803,9 +800,9 @@ def _build_filter_properties(self, context, scheduler_hints, forced_host,
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
- block_device_mapping):
+ block_device_mapping, shutdown_terminate):
# Reserve quotas
- num_instances, quota_reservations = self._check_num_instances_quota(
+ num_instances, quotas = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug("Going to run %s instances..." % num_instances)
instances = []
@@ -816,7 +813,7 @@ def _provision_instances(self, context, instance_type, min_count,
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
- num_instances, i)
+ num_instances, i, shutdown_terminate)
instances.append(instance)
# send a state update notification for the initial create to
@@ -835,10 +832,10 @@ def _provision_instances(self, context, instance_type, min_count,
except exception.ObjectActionError:
pass
finally:
- QUOTAS.rollback(context, quota_reservations)
+ quotas.rollback()
# Commit the reservations
- QUOTAS.commit(context, quota_reservations)
+ quotas.commit()
return instances
def _get_bdm_image_metadata(self, context, block_device_mapping,
@@ -927,7 +924,7 @@ def _create_instance(self, context, instance_type,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None,
- legacy_bdm=True):
+ legacy_bdm=True, shutdown_terminate=False):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
@@ -987,7 +984,7 @@ def _create_instance(self, context, instance_type,
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
- block_device_mapping)
+ block_device_mapping, shutdown_terminate)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host, forced_node, instance_type)
@@ -1153,15 +1150,6 @@ def _subsequent_list(l):
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
- def _populate_instance_shutdown_terminate(self, instance, image,
- block_device_mapping):
- """Populate instance shutdown_terminate information."""
- image_properties = image.get('properties', {})
- if (block_device_mapping or
- image_properties.get('mappings') or
- image_properties.get('block_device_mapping')):
- instance.shutdown_terminate = False
-
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
@@ -1224,11 +1212,11 @@ def _populate_instance_for_create(self, instance, image,
security_groups)
return instance
- #NOTE(bcwaldon): No policy check since this is only used by scheduler and
+ # NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
- index):
+ index, shutdown_terminate=False):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
@@ -1241,8 +1229,7 @@ def create_db_entry_for_new_instance(self, context, instance_type, image,
self._populate_instance_names(instance, num_instances)
- self._populate_instance_shutdown_terminate(instance, image,
- block_device_mapping)
+ instance.shutdown_terminate = shutdown_terminate
self.security_group_api.ensure_default(context)
instance.create(context)
@@ -1322,7 +1309,8 @@ def create(self, context, instance_type,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
- auto_disk_config=None, scheduler_hints=None, legacy_bdm=True):
+ auto_disk_config=None, scheduler_hints=None, legacy_bdm=True,
+ shutdown_terminate=False):
"""Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
@@ -1351,7 +1339,8 @@ def create(self, context, instance_type,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints,
- legacy_bdm=legacy_bdm)
+ legacy_bdm=legacy_bdm,
+ shutdown_terminate=shutdown_terminate)
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
@@ -1419,7 +1408,6 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs):
host = instance['host']
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
- reservations = None
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
@@ -1438,12 +1426,12 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs):
"from shelved instance (%s)."),
exc.format_message(), instance=instance)
except Exception as exc:
- LOG.exception(_("Something wrong happened when trying to "
- "delete snapshot from shelved instance."),
+ LOG.exception(_LE("Something wrong happened when trying to "
+ "delete snapshot from shelved instance."),
instance=instance)
original_task_state = instance.task_state
-
+ quotas = None
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
@@ -1453,10 +1441,10 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs):
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
- reservations = self._create_reservations(context,
- instance,
- original_task_state,
- project_id, user_id)
+ quotas = self._create_reservations(context,
+ instance,
+ original_task_state,
+ project_id, user_id)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
@@ -1465,11 +1453,7 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs):
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
- if reservations:
- QUOTAS.commit(context,
- reservations,
- project_id=project_id,
- user_id=user_id)
+ quotas.commit()
return
if not host:
@@ -1482,11 +1466,7 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs):
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
- if reservations:
- QUOTAS.commit(context,
- reservations,
- project_id=project_id,
- user_id=user_id)
+ quotas.commit()
return
except exception.ObjectActionError:
instance.refresh()
@@ -1505,42 +1485,30 @@ def _delete(self, context, instance, delete_type, cb, **instance_attrs):
task_states.SOFT_DELETING):
LOG.info(_('Instance is already in deleting state, '
'ignoring this request'), instance=instance)
- if reservations:
- QUOTAS.rollback(context, reservations,
- project_id=project_id,
- user_id=user_id)
+ quotas.rollback()
return
self._record_action_start(context, instance,
instance_actions.DELETE)
- cb(context, instance, bdms, reservations=reservations)
+ cb(context, instance, bdms,
+ reservations=quotas.reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms, delete_type, cb)
- if reservations:
- QUOTAS.commit(context,
- reservations,
- project_id=project_id,
- user_id=user_id)
- reservations = None
+ quotas.commit()
+
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
- if reservations:
- QUOTAS.rollback(context,
- reservations,
- project_id=project_id,
- user_id=user_id)
+ if quotas:
+ quotas.rollback()
except Exception:
with excutils.save_and_reraise_exception():
- if reservations:
- QUOTAS.rollback(context,
- reservations,
- project_id=project_id,
- user_id=user_id)
+ if quotas:
+ quotas.rollback()
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
@@ -1619,15 +1587,17 @@ def _create_reservations(self, context, instance, original_task_state,
vram_mb = int(old_inst_type.get('extra_specs',
{}).get(VIDEO_RAM, 0))
instance_memory_mb = (old_inst_type['memory_mb'] + vram_mb)
- LOG.debug("going to delete a resizing instance")
+ LOG.debug("going to delete a resizing instance",
+ instance=instance)
- reservations = QUOTAS.reserve(context,
- project_id=project_id,
- user_id=user_id,
- instances=-1,
- cores=-instance_vcpus,
- ram=-instance_memory_mb)
- return reservations
+ quotas = objects.Quotas(context)
+ quotas.reserve(context,
+ project_id=project_id,
+ user_id=user_id,
+ instances=-1,
+ cores=-instance_vcpus,
+ ram=-instance_memory_mb)
+ return quotas
def _local_delete(self, context, instance, bdms, delete_type, cb):
LOG.warning(_("instance's host %s is down, deleting from "
@@ -1725,7 +1695,7 @@ def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
flavor = instance.get_flavor()
- num_instances, quota_reservations = self._check_num_instances_quota(
+ num_instances, quotas = self._check_num_instances_quota(
context, flavor, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
@@ -1742,10 +1712,10 @@ def restore(self, context, instance):
instance.deleted_at = None
instance.save(expected_task_state=[None])
- QUOTAS.commit(context, quota_reservations)
+ quotas.commit()
except Exception:
with excutils.save_and_reraise_exception():
- QUOTAS.rollback(context, quota_reservations)
+ quotas.rollback()
@wrap_check_policy
@check_instance_lock
@@ -1769,8 +1739,7 @@ def force_stop(self, context, instance, do_cast=True):
@check_instance_lock
@check_instance_host
@check_instance_cell
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
- vm_states.ERROR])
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.ERROR])
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast)
@@ -1834,7 +1803,7 @@ def get_all(self, context, search_opts=None, sort_key='created_at',
parameter.
"""
- #TODO(bcwaldon): determine the best argument for target here
+ # TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
@@ -2080,7 +2049,7 @@ def snapshot_volume_backed(self, context, instance, image_meta, name,
properties['block_device_mapping'] = mapping
properties['bdm_v2'] = True
- for attr in ('status', 'location', 'id'):
+ for attr in ('status', 'location', 'id', 'owner'):
image_meta.pop(attr, None)
# the new image is simply a bucket of properties (particularly the
@@ -2204,11 +2173,12 @@ def _reset_image_metadata():
self._record_action_start(context, instance, instance_actions.REBUILD)
- self.compute_rpcapi.rebuild_instance(context, instance=instance,
+ self.compute_task_api.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
- preserve_ephemeral=preserve_ephemeral, kwargs=kwargs)
+ preserve_ephemeral=preserve_ephemeral, host=instance.host,
+ kwargs=kwargs)
@wrap_check_policy
@check_instance_lock
@@ -2398,6 +2368,9 @@ def resize(self, context, instance, flavor_id=None,
reason = _('Resize to zero disk flavor is not allowed.')
raise exception.CannotResizeDisk(reason=reason)
+ if not new_instance_type:
+ raise exception.FlavorNotFound(flavor_id=flavor_id)
+
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug("Old instance type %(current_instance_type_name)s, "
@@ -2406,9 +2379,6 @@ def resize(self, context, instance, flavor_id=None,
'new_instance_type_name': new_instance_type_name},
instance=instance)
- if not new_instance_type:
- raise exception.FlavorNotFound(flavor_id=flavor_id)
-
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
@@ -2542,7 +2512,7 @@ def remove_fixed_ip(self, context, instance, address):
@wrap_check_policy
@check_instance_lock
@check_instance_cell
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
+ @check_instance_state(vm_state=[vm_states.ACTIVE])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
@@ -2575,7 +2545,7 @@ def get_instance_diagnostics(self, context, instance):
@wrap_check_policy
@check_instance_lock
@check_instance_cell
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
+ @check_instance_state(vm_state=[vm_states.ACTIVE])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
@@ -2880,6 +2850,9 @@ def swap_volume(self, context, instance, old_volume, new_volume):
@wrap_check_policy
@check_instance_lock
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
+ vm_states.STOPPED],
+ task_state=[None])
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
@@ -2889,6 +2862,9 @@ def attach_interface(self, context, instance, network_id, port_id,
@wrap_check_policy
@check_instance_lock
+ @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
+ vm_states.STOPPED],
+ task_state=[None])
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
@@ -3052,8 +3028,14 @@ def evacuate(self, context, instance, host, on_shared_storage,
Checking vm compute host state, if the host not in expected_state,
raising an exception.
+
+ :param instance: The instance to evacuate
+ :param host: Target host. if not set, the scheduler will pick up one
+ :param on_shared_storage: True if instance files on shared storage
+ :param admin_password: password to set on rebuilt instance
+
"""
- LOG.debug('vm evacuation scheduled')
+ LOG.debug('vm evacuation scheduled', instance=instance)
inst_host = instance.host
service = objects.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
@@ -3066,17 +3048,17 @@ def evacuate(self, context, instance, host, on_shared_storage,
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.EVACUATE)
- return self.compute_rpcapi.rebuild_instance(context,
- instance=instance,
- new_pass=admin_password,
- injected_files=None,
- image_ref=None,
- orig_image_ref=None,
- orig_sys_metadata=None,
- bdms=None,
- recreate=True,
- on_shared_storage=on_shared_storage,
- host=host)
+ return self.compute_task_api.rebuild_instance(context,
+ instance=instance,
+ new_pass=admin_password,
+ injected_files=None,
+ image_ref=None,
+ orig_image_ref=None,
+ orig_sys_metadata=None,
+ bdms=None,
+ recreate=True,
+ on_shared_storage=on_shared_storage,
+ host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
@@ -3125,6 +3107,9 @@ def external_instance_event(self, context, instances, events):
events_by_host[host] = events_on_host
for host in instances_by_host:
+ # TODO(salv-orlando): Handle exceptions raised by the rpc api layer
+ # in order to ensure that a failure in processing events on a host
+ # will not prevent processing events on other hosts
self.compute_rpcapi.external_instance_event(
context, instances_by_host[host], events_by_host[host])
@@ -3460,7 +3445,7 @@ def add_host_to_aggregate(self, context, aggregate_id, host_name):
aggregate.add_host(context, host_name)
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
- #NOTE(jogo): Send message to host to support resource pools
+ # NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
@@ -3517,9 +3502,11 @@ def _validate_new_key_pair(self, context, user_id, key_name):
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
- if not 0 < len(key_name) < 256:
+ try:
+ utils.check_string_length(key_name, min_length=1, max_length=255)
+ except exception.InvalidInput:
raise exception.InvalidKeypair(
- reason=_('Keypair name must be between '
+ reason=_('Keypair name must be string and between '
'1 and 255 characters long'))
count = QUOTAS.count(context, 'key_pairs', user_id)
@@ -3609,9 +3596,8 @@ def validate_property(self, value, property, allowed):
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
- if not val:
- msg = _("Security group %s cannot be empty.") % property
- self.raise_invalid_property(msg)
+
+ utils.check_string_length(val, min_length=1, max_length=255)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
@@ -3623,10 +3609,6 @@ def validate_property(self, value, property, allowed):
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
- if len(val) > 255:
- msg = _("Security group %s should not be greater "
- "than 255 characters.") % property
- self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
@@ -3747,8 +3729,8 @@ def destroy(self, context, security_group):
quotas.reserve(context, project_id=quota_project,
user_id=quota_user, security_groups=-1)
except Exception:
- LOG.exception(_("Failed to update usages deallocating "
- "security group"))
+ LOG.exception(_LE("Failed to update usages deallocating "
+ "security group"))
LOG.audit(_("Delete security group %s"), security_group['name'],
context=context)
@@ -3784,7 +3766,7 @@ def add_to_instance(self, context, instance, security_group_name):
instance_uuid = instance['uuid']
- #check if the security group is associated with the server
+ # check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
@@ -3807,7 +3789,7 @@ def remove_from_instance(self, context, instance, security_group_name):
instance_uuid = instance['uuid']
- #check if the security group is associated with the server
+ # check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
diff --git a/nova/compute/arch.py b/nova/compute/arch.py
new file mode 100644
index 0000000000..12e9f05151
--- /dev/null
+++ b/nova/compute/arch.py
@@ -0,0 +1,155 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Constants and helper APIs for dealing with CPU architectures
+
+The constants provide the standard names for all known processor
+architectures. Many have multiple variants to deal with big-endian
+vs little-endian modes, as well as 32 vs 64 bit word sizes. These
+names are chosen to be identical to the architecture names expected
+by libvirt, so if ever adding new ones, ensure it matches libvirt's
+expectation.
+"""
+
+import os
+
+from nova import exception
+
+ALPHA = "alpha"
+ARMV6 = "armv6"
+ARMV7 = "armv7l"
+ARMV7B = "armv7b"
+
+AARCH64 = "aarch64"
+CRIS = "cris"
+I686 = "i686"
+IA64 = "ia64"
+LM32 = "lm32"
+
+M68K = "m68k"
+MICROBLAZE = "microblaze"
+MICROBLAZEEL = "microblazeel"
+MIPS = "mips"
+MIPSEL = "mipsel"
+
+MIPS64 = "mips64"
+MIPS64EL = "mips64el"
+OPENRISC = "openrisc"
+PARISC = "parisc"
+PARISC64 = "parisc64"
+
+PPC = "ppc"
+PPCLE = "ppcle"
+PPC64 = "ppc64"
+PPC64LE = "ppc64le"
+PPCEMB = "ppcemb"
+
+S390 = "s390"
+S390X = "s390x"
+SH4 = "sh4"
+SH4EB = "sh4eb"
+SPARC = "sparc"
+
+SPARC64 = "sparc64"
+UNICORE32 = "unicore32"
+X86_64 = "x86_64"
+XTENSA = "xtensa"
+XTENSAEB = "xtensaeb"
+
+
+ALL = [
+ ALPHA,
+ ARMV6,
+ ARMV7,
+ ARMV7B,
+
+ AARCH64,
+ CRIS,
+ I686,
+ IA64,
+ LM32,
+
+ M68K,
+ MICROBLAZE,
+ MICROBLAZEEL,
+ MIPS,
+ MIPSEL,
+
+ MIPS64,
+ MIPS64EL,
+ OPENRISC,
+ PARISC,
+ PARISC64,
+
+ PPC,
+ PPCLE,
+ PPC64,
+ PPC64LE,
+ PPCEMB,
+
+ S390,
+ S390X,
+ SH4,
+ SH4EB,
+ SPARC,
+
+ SPARC64,
+ UNICORE32,
+ X86_64,
+ XTENSA,
+ XTENSAEB,
+]
+
+
+def from_host():
+ """Get the architecture of the host OS
+
+ :returns: the canonicalized host architecture
+ """
+
+ return canonicalize(os.uname()[4])
+
+
+def is_valid(name):
+ """Check if a string is a valid architecture
+
+ :param name: architecture name to validate
+
+ :returns: True if @name is valid
+ """
+
+ return name in ALL
+
+
+def canonicalize(name):
+ """Canonicalize the architecture name
+
+ :param name: architecture name to canonicalize
+
+ :returns: a canonical architecture name
+ """
+
+ if name is None:
+ return None
+
+ newname = name.lower()
+
+ if newname in ("i386", "i486", "i586"):
+ newname = I686
+
+ if not is_valid(newname):
+ raise exception.InvalidArchitectureName(arch=name)
+
+ return newname
diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py
index 4416ffacea..4abbd3625c 100644
--- a/nova/compute/cells_api.py
+++ b/nova/compute/cells_api.py
@@ -49,8 +49,7 @@ class ComputeRPCAPIRedirect(object):
'unpause_instance', 'revert_resize',
'confirm_resize', 'reset_network',
'inject_network_info',
- 'backup_instance', 'snapshot_instance',
- 'rebuild_instance']
+ 'backup_instance', 'snapshot_instance']
def __init__(self, cells_rpcapi):
self.cells_rpcapi = cells_rpcapi
@@ -70,7 +69,7 @@ class ConductorTaskRPCAPIRedirect(object):
# is for transitioning to a common interface where we can just
# swap out the compute_task_rpcapi class with the cells_rpcapi class.
cells_compatible = ['build_instances', 'resize_instance',
- 'live_migrate_instance']
+ 'live_migrate_instance', 'rebuild_instance']
def __init__(self, cells_rpcapi_obj):
self.cells_rpcapi = cells_rpcapi_obj
diff --git a/nova/compute/claims.py b/nova/compute/claims.py
index 27d8c0bc7b..4f5356ce78 100644
--- a/nova/compute/claims.py
+++ b/nova/compute/claims.py
@@ -18,8 +18,8 @@
"""
from nova import exception
+from nova.i18n import _
from nova.objects import base as obj_base
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.pci import pci_request
@@ -42,10 +42,6 @@ def disk_gb(self):
def memory_mb(self):
return 0
- @property
- def vcpus(self):
- return 0
-
def __enter__(self):
return self
@@ -57,8 +53,8 @@ def abort(self):
pass
def __str__(self):
- return "[Claim: %d MB memory, %d GB disk, %d VCPUS]" % (self.memory_mb,
- self.disk_gb, self.vcpus)
+ return "[Claim: %d MB memory, %d GB disk]" % (self.memory_mb,
+ self.disk_gb)
class Claim(NopClaim):
@@ -102,10 +98,6 @@ def disk_gb(self):
def memory_mb(self):
return self.instance['memory_mb'] + self.overhead['memory_mb']
- @property
- def vcpus(self):
- return self.instance['vcpus']
-
def abort(self):
"""Compute operation requiring claimed resources has failed or
been aborted.
@@ -130,18 +122,16 @@ def _claim_test(self, resources, limits=None):
# unlimited:
memory_mb_limit = limits.get('memory_mb')
disk_gb_limit = limits.get('disk_gb')
- vcpu_limit = limits.get('vcpu')
msg = _("Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d "
- "GB, VCPUs %(vcpus)d")
- params = {'memory_mb': self.memory_mb, 'disk_gb': self.disk_gb,
- 'vcpus': self.vcpus}
+ "GB")
+ params = {'memory_mb': self.memory_mb, 'disk_gb': self.disk_gb}
LOG.audit(msg % params, instance=self.instance)
reasons = [self._test_memory(resources, memory_mb_limit),
self._test_disk(resources, disk_gb_limit),
- self._test_cpu(resources, vcpu_limit),
self._test_pci()]
+ reasons = reasons + self._test_ext_resources(limits)
reasons = [r for r in reasons if r is not None]
if len(reasons) > 0:
raise exception.ComputeResourcesUnavailable(reason=
@@ -176,14 +166,9 @@ def _test_pci(self):
if not can_claim:
return _('Claim pci failed.')
- def _test_cpu(self, resources, limit):
- type_ = _("CPUs")
- unit = "VCPUs"
- total = resources['vcpus']
- used = resources['vcpus_used']
- requested = self.vcpus
-
- return self._test(type_, unit, total, used, requested, limit)
+ def _test_ext_resources(self, limits):
+ return self.tracker.ext_resources_handler.test_resources(
+ self.instance, limits)
def _test(self, type_, unit, total, used, requested, limit):
"""Test if the given type of resource needed for a claim can be safely
@@ -235,10 +220,6 @@ def disk_gb(self):
def memory_mb(self):
return self.instance_type['memory_mb'] + self.overhead['memory_mb']
- @property
- def vcpus(self):
- return self.instance_type['vcpus']
-
def _test_pci(self):
pci_requests = pci_request.get_instance_pci_requests(
self.instance, 'new_')
@@ -248,6 +229,10 @@ def _test_pci(self):
if not claim:
return _('Claim pci failed.')
+ def _test_ext_resources(self, limits):
+ return self.tracker.ext_resources_handler.test_resources(
+ self.instance_type, limits)
+
def abort(self):
"""Compute operation requiring claimed resources has failed or
been aborted.
diff --git a/nova/compute/flavors.py b/nova/compute/flavors.py
index e391943158..1d1e0d60b7 100644
--- a/nova/compute/flavors.py
+++ b/nova/compute/flavors.py
@@ -27,8 +27,9 @@
from nova import context
from nova import db
from nova import exception
+from nova.i18n import _
+from nova.i18n import _LE
from nova.openstack.common.db import exception as db_exc
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.pci import pci_request
@@ -163,7 +164,7 @@ def create(name, memory, vcpus, root_gb, ephemeral_gb=0, flavorid=None,
try:
return db.flavor_create(context.get_admin_context(), kwargs)
except db_exc.DBError as e:
- LOG.exception(_('DB error: %s') % e)
+ LOG.exception(_LE('DB error: %s'), e)
raise exception.FlavorCreateFailed()
@@ -174,7 +175,7 @@ def destroy(name):
raise ValueError()
db.flavor_destroy(context.get_admin_context(), name)
except (ValueError, exception.NotFound):
- LOG.exception(_('Instance type %s not found for deletion') % name)
+ LOG.exception(_LE('Instance type %s not found for deletion'), name)
raise exception.FlavorNotFoundByName(flavor_name=name)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index f71f7e4c84..f41ee75747 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -34,6 +34,7 @@
import traceback
import uuid
+from cinderclient import exceptions as cinder_exception
import eventlet.event
from eventlet import greenthread
import eventlet.timeout
@@ -57,6 +58,10 @@
import nova.context
from nova import exception
from nova import hooks
+from nova.i18n import _
+from nova.i18n import _LE
+from nova.i18n import _LI
+from nova.i18n import _LW
from nova import image
from nova.image import glance
from nova import manager
@@ -68,10 +73,6 @@
from nova.objects import instance as instance_obj
from nova.objects import quotas as quotas_obj
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common.gettextutils import _LE
-from nova.openstack.common.gettextutils import _LI
-from nova.openstack.common.gettextutils import _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import periodic_task
@@ -192,6 +193,10 @@
default=0,
help="Automatically confirm resizes after N seconds. "
"Set to 0 to disable."),
+ cfg.IntOpt("shutdown_timeout",
+ default=60,
+ help="Total amount of time to wait in seconds for an instance "
+ "to perform a clean shutdown."),
]
running_deleted_opts = [
@@ -362,8 +367,9 @@ def decorated_function(self, context, image_id, instance,
try:
self.image_api.delete(context, image_id)
except Exception:
- LOG.exception(_("Error while trying to clean up image %s")
- % image_id, instance=instance)
+ LOG.exception(_LE("Error while trying to clean up "
+ "image %s"), image_id,
+ instance=instance)
return decorated_function
@@ -529,13 +535,14 @@ def wait_for_instance_event(self, instance, event_names, deadline=300,
waiting for the rest of the events, False to stop processing,
or raise an exception which will bubble up to the waiter.
- :param:instance: The instance for which an event is expected
- :param:event_names: A list of event names. Each element can be a
+ :param instance: The instance for which an event is expected
+ :param event_names: A list of event names. Each element can be a
string event name or tuple of strings to
indicate (name, tag).
- :param:deadline: Maximum number of seconds we should wait for all
+ :param deadline: Maximum number of seconds we should wait for all
of the specified events to arrive.
- :param:error_callback: A function to be called if an event arrives
+ :param error_callback: A function to be called if an event arrives
+
"""
if error_callback is None:
@@ -565,6 +572,11 @@ class ComputeManager(manager.Manager):
target = messaging.Target(version='3.32')
+ # How long to wait in seconds before re-issuing a shutdown
+ # signal to a instance during power off. The overall
+ # time to wait is set by CONF.shutdown_timeout.
+ SHUTDOWN_RETRY_INTERVAL = 10
+
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
self.virtapi = ComputeVirtAPI(self)
@@ -658,7 +670,7 @@ def _get_instances_on_driver(self, context, filters=None):
driver_uuids = self.driver.list_instance_uuids()
filters['uuid'] = driver_uuids
local_instances = objects.InstanceList.get_by_filters(
- context, filters, use_slave=True)
+ context, filters, use_subordinate=True)
return local_instances
except NotImplementedError:
pass
@@ -667,7 +679,7 @@ def _get_instances_on_driver(self, context, filters=None):
# to brute force.
driver_instances = self.driver.list_instances()
instances = objects.InstanceList.get_by_filters(context, filters,
- use_slave=True)
+ use_subordinate=True)
name_map = dict((instance.name, instance) for instance in instances)
local_instances = []
for driver_instance in driver_instances:
@@ -684,21 +696,28 @@ def _destroy_evacuated_instances(self, context):
evacuated to another host. Check that the instances reported
by the driver are still associated with this host. If they are
not, destroy them, with the exception of instances which are in
- the MIGRATING state.
+ the MIGRATING, RESIZE_MIGRATING, RESIZE_MIGRATED, RESIZE_FINISH
+ task state or RESIZED vm state.
"""
our_host = self.host
filters = {'deleted': False}
local_instances = self._get_instances_on_driver(context, filters)
for instance in local_instances:
if instance.host != our_host:
- if instance.task_state in [task_states.MIGRATING]:
+ if (instance.task_state in [task_states.MIGRATING,
+ task_states.RESIZE_MIGRATING,
+ task_states.RESIZE_MIGRATED,
+ task_states.RESIZE_FINISH]
+ or instance.vm_state in [vm_states.RESIZED]):
LOG.debug('Will not delete instance as its host ('
'%(instance_host)s) is not equal to our '
- 'host (%(our_host)s) but its state is '
- '(%(task_state)s)',
+ 'host (%(our_host)s) but its task state is '
+ '(%(task_state)s) and vm state is '
+ '(%(vm_state)s)',
{'instance_host': instance.host,
'our_host': our_host,
- 'task_state': instance.task_state},
+ 'task_state': instance.task_state,
+ 'vm_state': instance.vm_state},
instance=instance)
continue
LOG.info(_('Deleting instance as its host ('
@@ -742,7 +761,7 @@ def _is_instance_storage_shared(self, context, instance):
instance=instance)
shared_storage = False
except Exception:
- LOG.exception(_('Failed to check if instance shared'),
+ LOG.exception(_LE('Failed to check if instance shared'),
instance=instance)
finally:
if data:
@@ -808,7 +827,7 @@ def _init_instance(self, context, instance):
self._complete_partial_deletion(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
- msg = _('Failed to complete a deletion')
+ msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
return
@@ -836,6 +855,12 @@ def _init_instance(self, context, instance):
LOG.debug("Instance in transitional state %s at start-up "
"clearing task state",
instance['task_state'], instance=instance)
+ try:
+ self._post_interrupted_snapshot_cleanup(context, instance)
+ except Exception:
+ # we don't want that an exception blocks the init_host
+ msg = _LE('Failed to cleanup snapshot.')
+ LOG.exception(msg, instance=instance)
instance.task_state = None
instance.save()
@@ -859,7 +884,7 @@ def _init_instance(self, context, instance):
self._delete_instance(context, instance, bdms, quotas)
except Exception:
# we don't want that an exception blocks the init_host
- msg = _('Failed to complete a deletion')
+ msg = _LE('Failed to complete a deletion')
LOG.exception(msg, instance=instance)
self._set_instance_error_state(context, instance)
return
@@ -900,7 +925,7 @@ def _init_instance(self, context, instance):
self.stop_instance(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
- msg = _('Failed to stop instance')
+ msg = _LE('Failed to stop instance')
LOG.exception(msg, instance=instance)
return
@@ -912,7 +937,7 @@ def _init_instance(self, context, instance):
self.start_instance(context, instance)
except Exception:
# we don't want that an exception blocks the init_host
- msg = _('Failed to start instance')
+ msg = _LE('Failed to start instance')
LOG.exception(msg, instance=instance)
return
@@ -937,7 +962,7 @@ def _init_instance(self, context, instance):
instance, net_info, block_dev_info, power_on)
except Exception as e:
- LOG.exception(_('Failed to revert crashed migration'),
+ LOG.exception(_LE('Failed to revert crashed migration'),
instance=instance)
finally:
LOG.info(_('Instance found in migrating state during '
@@ -1088,7 +1113,7 @@ def get_console_topic(self, context):
Currently this is just set in the flags for each compute host.
"""
- #TODO(mdragon): perhaps make this variable by console_type?
+ # TODO(mdragon): perhaps make this variable by console_type?
return '%s.%s' % (CONF.console_topic, CONF.console_host)
def get_console_pool_info(self, context, console_type):
@@ -1137,7 +1162,7 @@ def refresh_provider_fw_rules(self, context):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
- def _get_instance_nw_info(self, context, instance, use_slave=False):
+ def _get_instance_nw_info(self, context, instance, use_subordinate=False):
"""Get a list of dictionaries of network data of an instance."""
if (not hasattr(instance, 'system_metadata') or
len(instance['system_metadata']) == 0):
@@ -1148,7 +1173,7 @@ def _get_instance_nw_info(self, context, instance, use_slave=False):
# succeed.
instance = objects.Instance.get_by_uuid(context,
instance['uuid'],
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
network_info = self.network_api.get_instance_nw_info(context,
instance)
@@ -1248,7 +1273,7 @@ def _prebuild_instance(self, context, instance):
exception.UnexpectedDeletingTaskStateError):
msg = _("Instance disappeared before we could start it")
# Quickly bail out of here
- raise exception.BuildAbortException(instance_uuid=instance['uuid'],
+ raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=msg)
def _validate_instance_group_policy(self, context, instance,
@@ -1271,11 +1296,11 @@ def _do_validation(context, instance, group_hint):
if 'anti-affinity' not in group.policies:
return
- group_hosts = group.get_hosts(context, exclude=[instance['uuid']])
+ group_hosts = group.get_hosts(context, exclude=[instance.uuid])
if self.host in group_hosts:
msg = _("Anti-affinity instance group policy was violated.")
raise exception.RescheduledException(
- instance_uuid=instance['uuid'],
+ instance_uuid=instance.uuid,
reason=msg)
_do_validation(context, instance, group_hint)
@@ -1283,6 +1308,7 @@ def _do_validation(context, instance, group_hint):
def _build_instance(self, context, request_spec, filter_properties,
requested_networks, injected_files, admin_password, is_first_time,
node, instance, image_meta, legacy_bdm_in_spec):
+ original_context = context
context = context.elevated()
# If neutron security groups pass requested security
@@ -1298,7 +1324,7 @@ def _build_instance(self, context, request_spec, filter_properties,
network_info = None
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
- context, instance['uuid'])
+ context, instance.uuid)
# b64 decode the files to inject:
injected_files_orig = injected_files
@@ -1316,14 +1342,13 @@ def _build_instance(self, context, request_spec, filter_properties,
macs = self.driver.macs_for_instance(instance)
dhcp_options = self.driver.dhcp_options_for_instance(instance)
- network_info = self._allocate_network(context, instance,
- requested_networks, macs, security_groups,
+ network_info = self._allocate_network(original_context,
+ instance, requested_networks, macs, security_groups,
dhcp_options)
- self._instance_update(
- context, instance['uuid'],
- vm_state=vm_states.BUILDING,
- task_state=task_states.BLOCK_DEVICE_MAPPING)
+ instance.vm_state = vm_states.BUILDING
+ instance.task_state = task_states.BLOCK_DEVICE_MAPPING
+ instance.save()
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
@@ -1334,8 +1359,8 @@ def _build_instance(self, context, request_spec, filter_properties,
context, instance, bdms)
set_access_ip = (is_first_time and
- not instance['access_ip_v4'] and
- not instance['access_ip_v6'])
+ not instance.access_ip_v4 and
+ not instance.access_ip_v6)
instance = self._spawn(context, instance, image_meta,
network_info, block_device_info,
@@ -1350,11 +1375,11 @@ def _build_instance(self, context, request_spec, filter_properties,
try:
self._deallocate_network(context, instance)
except Exception:
- msg = _('Failed to dealloc network '
- 'for deleted instance')
+ msg = _LE('Failed to dealloc network '
+ 'for deleted instance')
LOG.exception(msg, instance=instance)
raise exception.BuildAbortException(
- instance_uuid=instance['uuid'],
+ instance_uuid=instance.uuid,
reason=_("Instance disappeared during build"))
except (exception.UnexpectedTaskStateError,
exception.VirtualInterfaceCreateException) as e:
@@ -1371,8 +1396,8 @@ def _build_instance(self, context, request_spec, filter_properties,
try:
self._deallocate_network(context, instance)
except Exception:
- msg = _('Failed to dealloc network '
- 'for failed instance')
+ msg = _LE('Failed to dealloc network '
+ 'for failed instance')
LOG.exception(msg, instance=instance)
except Exception:
exc_info = sys.exc_info()
@@ -1386,9 +1411,9 @@ def _build_instance(self, context, request_spec, filter_properties,
filter_properties, bdms, legacy_bdm_in_spec)
if rescheduled:
# log the original build error
- self._log_original_error(exc_info, instance['uuid'])
+ self._log_original_error(exc_info, instance.uuid)
raise exception.RescheduledException(
- instance_uuid=instance['uuid'],
+ instance_uuid=instance.uuid,
reason=unicode(exc_info[1]))
else:
# not re-scheduling, go to error:
@@ -1444,13 +1469,13 @@ def _reschedule_or_error(self, context, instance, exc_info,
except Exception:
rescheduled = False
- LOG.exception(_("Error trying to reschedule"),
+ LOG.exception(_LE("Error trying to reschedule"),
instance_uuid=instance_uuid)
return rescheduled
def _reschedule(self, context, request_spec, filter_properties,
- instance, scheduler_method, method_args, task_state,
+ instance, reschedule_method, method_args, task_state,
exc_info=None):
"""Attempt to re-schedule a compute operation."""
@@ -1470,7 +1495,7 @@ def _reschedule(self, context, request_spec, filter_properties,
request_spec['instance_uuids'] = [instance_uuid]
LOG.debug("Re-scheduling %(method)s: attempt %(num)d",
- {'method': scheduler_method.func_name,
+ {'method': reschedule_method.func_name,
'num': retry['num_attempts']}, instance_uuid=instance_uuid)
# reset the task state:
@@ -1481,7 +1506,7 @@ def _reschedule(self, context, request_spec, filter_properties,
retry['exc'] = traceback.format_exception_only(exc_info[0],
exc_info[1])
- scheduler_method(context, *method_args)
+ reschedule_method(context, *method_args)
return True
@periodic_task.periodic_task
@@ -1495,7 +1520,7 @@ def _check_instance_build_time(self, context):
'host': self.host}
building_insts = objects.InstanceList.get_by_filters(context,
- filters, expected_attrs=[], use_slave=True)
+ filters, expected_attrs=[], use_subordinate=True)
for instance in building_insts:
if timeutils.is_older_than(instance['created_at'], timeout):
@@ -1512,7 +1537,7 @@ def _start_building(self, context, instance):
"""Save the host and launched_on fields and log appropriately."""
LOG.audit(_('Starting instance...'), context=context,
instance=instance)
- self._instance_update(context, instance['uuid'],
+ self._instance_update(context, instance.uuid,
vm_state=vm_states.BUILDING,
task_state=None,
expected_task_state=(task_states.SCHEDULING,
@@ -1553,8 +1578,8 @@ def _allocate_network_async(self, context, instance, requested_networks,
log_info = {'attempt': attempt,
'attempts': attempts}
if attempt == attempts:
- LOG.exception(_('Instance failed network setup '
- 'after %(attempts)d attempt(s)'),
+ LOG.exception(_LE('Instance failed network setup '
+ 'after %(attempts)d attempt(s)'),
log_info)
raise exc_info[0], exc_info[1], exc_info[2]
LOG.warn(_('Instance failed network setup '
@@ -1661,10 +1686,10 @@ def _default_block_device_names(self, context, instance,
if root_bdm.device_name:
root_device_name = root_bdm.device_name
- instance['root_device_name'] = root_device_name
+ instance.root_device_name = root_device_name
update_instance = True
- elif instance['root_device_name']:
- root_device_name = instance['root_device_name']
+ elif instance.root_device_name:
+ root_device_name = instance.root_device_name
root_bdm.device_name = root_device_name
update_root_bdm = True
else:
@@ -1672,25 +1697,21 @@ def _default_block_device_names(self, context, instance,
image_meta,
root_bdm)
- instance['root_device_name'] = root_device_name
+ instance.root_device_name = root_device_name
root_bdm.device_name = root_device_name
update_instance = update_root_bdm = True
if update_instance:
- self._instance_update(context, instance['uuid'],
- root_device_name=root_device_name)
+ instance.save()
if update_root_bdm:
root_bdm.save()
- def _is_mapping(bdm):
- return (bdm.source_type in ('image', 'volume', 'snapshot') and
- driver_block_device.is_implemented(bdm))
-
ephemerals = filter(block_device.new_format_is_ephemeral,
block_devices)
swap = filter(block_device.new_format_is_swap,
block_devices)
- block_device_mapping = filter(_is_mapping, block_devices)
+ block_device_mapping = filter(
+ driver_block_device.is_block_device_mapping, block_devices)
self._default_device_names_for_instance(instance,
root_device_name,
@@ -1698,7 +1719,8 @@ def _is_mapping(bdm):
swap,
block_device_mapping)
- def _prep_block_device(self, context, instance, bdms):
+ def _prep_block_device(self, context, instance, bdms,
+ do_check_attach=True):
"""Set up the block device for an instance with error logging."""
try:
block_device_info = {
@@ -1709,15 +1731,22 @@ def _prep_block_device(self, context, instance, bdms):
driver_block_device.attach_block_devices(
driver_block_device.convert_volumes(bdms),
context, instance, self.volume_api,
- self.driver) +
+ self.driver, do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_snapshots(bdms),
context, instance, self.volume_api,
- self.driver, self._await_block_device_map_created) +
+ self.driver, self._await_block_device_map_created,
+ do_check_attach=do_check_attach) +
driver_block_device.attach_block_devices(
driver_block_device.convert_images(bdms),
context, instance, self.volume_api,
- self.driver, self._await_block_device_map_created))
+ self.driver, self._await_block_device_map_created,
+ do_check_attach=do_check_attach) +
+ driver_block_device.attach_block_devices(
+ driver_block_device.convert_blanks(bdms),
+ context, instance, self.volume_api,
+ self.driver, self._await_block_device_map_created,
+ do_check_attach=do_check_attach))
}
if self.use_legacy_block_device_info:
@@ -1732,13 +1761,13 @@ def _prep_block_device(self, context, instance, bdms):
return block_device_info
except exception.OverQuota:
- msg = ('Failed to create block device for instance due to being '
- 'over volume resource quota')
- LOG.debug(msg, instance=instance)
+ msg = _LW('Failed to create block device for instance due to '
+ 'being over volume resource quota')
+ LOG.warn(msg, instance=instance)
raise exception.InvalidBDM()
except Exception:
- LOG.exception(_('Instance failed block device setup'),
+ LOG.exception(_LE('Instance failed block device setup'),
instance=instance)
raise exception.InvalidBDM()
@@ -1758,7 +1787,8 @@ def _spawn(self, context, instance, image_meta, network_info,
block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_('Instance failed to spawn'), instance=instance)
+ LOG.exception(_LE('Instance failed to spawn'),
+ instance=instance)
current_power_state = self._get_power_state(context, instance)
@@ -1905,16 +1935,16 @@ def do_build_and_run_instance(context, instance, image, request_spec,
retry = filter_properties.get('retry', None)
if not retry:
# no retry information, do not reschedule.
- LOG.debug("Retry info not present, will not reschedule")
+ LOG.debug("Retry info not present, will not reschedule",
+ instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
self._set_instance_error_state(context, instance.uuid)
return
retry['exc'] = traceback.format_exception(*sys.exc_info())
- # The MAC address for this instance is tied to the host so if
- # we're going to reschedule we have to free the network details
- # and reallocate on the next host.
- if self.driver.macs_for_instance(instance):
+ # NOTE(comstud): Deallocate networks if the driver wants
+ # us to do so.
+ if self.driver.deallocate_networks_on_reschedule(instance):
self._cleanup_allocated_networks(context, instance,
requested_networks)
@@ -1935,13 +1965,17 @@ def do_build_and_run_instance(context, instance, image, request_spec,
LOG.exception(e.format_message(), instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
+ self._cleanup_volumes(context, instance.uuid,
+ block_device_mapping, raise_exc=False)
self._set_instance_error_state(context, instance)
except Exception:
# Should not reach here.
- msg = _('Unexpected build failure, not rescheduling build.')
+ msg = _LE('Unexpected build failure, not rescheduling build.')
LOG.exception(msg, instance=instance)
self._cleanup_allocated_networks(context, instance,
requested_networks)
+ self._cleanup_volumes(context, instance.uuid,
+ block_device_mapping, raise_exc=False)
self._set_instance_error_state(context, instance)
do_build_and_run_instance(context, instance, image, request_spec,
@@ -1977,10 +2011,6 @@ def _build_and_run_instance(self, context, instance, image, injected_files,
injected_files, admin_password,
network_info=network_info,
block_device_info=block_device_info)
- self._notify_about_instance_usage(context, instance,
- 'create.end',
- extra_usage_info={'message': _('Success')},
- network_info=network_info)
except (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError) as e:
with excutils.save_and_reraise_exception():
@@ -2009,7 +2039,7 @@ def _build_and_run_instance(self, context, instance, image, injected_files,
reason=msg)
except (exception.VirtualInterfaceCreateException,
exception.VirtualInterfaceMacAddressException) as e:
- LOG.exception(_('Failed to allocate network(s)'),
+ LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
@@ -2028,7 +2058,7 @@ def _build_and_run_instance(self, context, instance, image, injected_files,
self._notify_about_instance_usage(context, instance,
'create.error', fault=e)
raise exception.RescheduledException(
- instance_uuid=instance.uuid, reason=str(e))
+ instance_uuid=instance.uuid, reason=six.text_type(e))
# NOTE(alaski): This is only useful during reschedules, remove it now.
instance.system_metadata.pop('network_allocated', None)
@@ -2037,7 +2067,18 @@ def _build_and_run_instance(self, context, instance, image, injected_files,
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.launched_at = timeutils.utcnow()
- instance.save(expected_task_state=task_states.SPAWNING)
+
+ try:
+ instance.save(expected_task_state=task_states.SPAWNING)
+ except (exception.InstanceNotFound,
+ exception.UnexpectedDeletingTaskStateError) as e:
+ with excutils.save_and_reraise_exception():
+ self._notify_about_instance_usage(context, instance,
+ 'create.end', fault=e)
+
+ self._notify_about_instance_usage(context, instance, 'create.end',
+ extra_usage_info={'message': _('Success')},
+ network_info=network_info)
@contextlib.contextmanager
def _build_resources(self, context, instance, requested_networks,
@@ -2057,7 +2098,7 @@ def _build_resources(self, context, instance, requested_networks,
except Exception:
# Because this allocation is async any failures are likely to occur
# when the driver accesses network_info during spawn().
- LOG.exception(_('Failed to allocate network(s)'),
+ LOG.exception(_LE('Failed to allocate network(s)'),
instance=instance)
msg = _('Failed to allocate the network(s), not rescheduling.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
@@ -2083,7 +2124,7 @@ def _build_resources(self, context, instance, requested_networks,
raise exception.BuildAbortException(instance_uuid=instance.uuid,
reason=e.format_message())
except Exception:
- LOG.exception(_('Failure prepping block device'),
+ LOG.exception(_LE('Failure prepping block device'),
instance=instance)
msg = _('Failure prepping block device.')
raise exception.BuildAbortException(instance_uuid=instance.uuid,
@@ -2095,7 +2136,7 @@ def _build_resources(self, context, instance, requested_networks,
with excutils.save_and_reraise_exception() as ctxt:
if not isinstance(exc, (exception.InstanceNotFound,
exception.UnexpectedDeletingTaskStateError)):
- LOG.exception(_('Instance failed to spawn'),
+ LOG.exception(_LE('Instance failed to spawn'),
instance=instance)
# Make sure the async call finishes
if network_info is not None:
@@ -2104,8 +2145,6 @@ def _build_resources(self, context, instance, requested_networks,
self._shutdown_instance(context, instance,
block_device_mapping, requested_networks,
try_deallocate_networks=False)
- self._cleanup_build_resources(context, instance,
- block_device_mapping)
except Exception:
ctxt.reraise = False
msg = _('Could not clean up failed build,'
@@ -2118,7 +2157,7 @@ def _cleanup_allocated_networks(self, context, instance,
try:
self._deallocate_network(context, instance, requested_networks)
except Exception:
- msg = _('Failed to deallocate networks')
+ msg = _LE('Failed to deallocate networks')
LOG.exception(msg, instance=instance)
return
@@ -2131,18 +2170,6 @@ def _cleanup_allocated_networks(self, context, instance,
# exception will be raised by instance.save()
pass
- def _cleanup_build_resources(self, context, instance,
- block_device_mapping):
- # Don't clean up networks here in case we reschedule
- try:
- self._cleanup_volumes(context, instance.uuid,
- block_device_mapping)
- except Exception:
- with excutils.save_and_reraise_exception():
- msg = _('Failed to cleanup volumes for failed build,'
- ' not rescheduling')
- LOG.exception(msg, instance=instance)
-
@object_compat
@messaging.expected_exceptions(exception.BuildAbortException,
exception.UnexpectedTaskStateError,
@@ -2182,6 +2209,25 @@ def _try_deallocate_network(self, context, instance,
instance=instance)
self._set_instance_error_state(context, instance)
+ def _get_power_off_values(self, context, instance, clean_shutdown):
+ """Get the timing configuration for powering down this instance."""
+ if clean_shutdown:
+ timeout = compute_utils.get_value_from_system_metadata(instance,
+ key='image_os_shutdown_timeout', type=int,
+ default=CONF.shutdown_timeout)
+ retry_interval = self.SHUTDOWN_RETRY_INTERVAL
+ else:
+ timeout = 0
+ retry_interval = 0
+
+ return timeout, retry_interval
+
+ def _power_off_instance(self, context, instance, clean_shutdown=True):
+ """Power off an instance on this host."""
+ timeout, retry_interval = self._get_power_off_values(context,
+ instance, clean_shutdown)
+ self.driver.power_off(instance, timeout, retry_interval)
+
def _shutdown_instance(self, context, instance,
bdms, requested_networks=None, notify=True,
try_deallocate_networks=True):
@@ -2243,11 +2289,14 @@ def _shutdown_instance(self, context, instance,
connector)
self.volume_api.detach(context, bdm.volume_id)
except exception.DiskNotFound as exc:
- LOG.warn(_('Ignoring DiskNotFound: %s') % exc,
- instance=instance)
+ LOG.debug('Ignoring DiskNotFound: %s', exc,
+ instance=instance)
except exception.VolumeNotFound as exc:
- LOG.warn(_('Ignoring VolumeNotFound: %s') % exc,
- instance=instance)
+ LOG.debug('Ignoring VolumeNotFound: %s', exc,
+ instance=instance)
+ except cinder_exception.EndpointNotFound as exc:
+ LOG.warn(_LW('Ignoring EndpointNotFound: %s'), exc,
+ instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
@@ -2313,6 +2362,7 @@ def _delete_instance(self, context, instance, bdms, quotas):
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
+ self._update_resource_tracker(context, instance)
system_meta = instance.system_metadata
instance.destroy()
except Exception:
@@ -2356,7 +2406,7 @@ def do_terminate_instance(instance, bdms):
# As we're trying to delete always go to Error if something
# goes wrong that _delete_instance can't handle.
with excutils.save_and_reraise_exception():
- LOG.exception(_('Setting instance vm_state to ERROR'),
+ LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
self._set_instance_error_state(context, instance)
@@ -2369,14 +2419,14 @@ def do_terminate_instance(instance, bdms):
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
- def stop_instance(self, context, instance):
+ def stop_instance(self, context, instance, clean_shutdown=True):
"""Stopping an instance on this host."""
@utils.synchronized(instance.uuid)
def do_stop_instance():
self._notify_about_instance_usage(context, instance,
"power_off.start")
- self.driver.power_off(instance)
+ self._power_off_instance(context, instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
instance.power_state = current_power_state
instance.vm_state = vm_states.STOPPED
@@ -2568,7 +2618,7 @@ def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node.hypervisor_hostname
except exception.NotFound:
- LOG.exception(_('Failed to get compute_info for %s') %
+ LOG.exception(_LE('Failed to get compute_info for %s'),
self.host)
finally:
instance.host = self.host
@@ -2583,7 +2633,7 @@ def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
# to point to the new one... we have to override it.
- #TODO(jaypipes): Move generate_image_url() into the nova.image.api
+ # TODO(jaypipes): Move generate_image_url() into the nova.image.api
orig_image_ref_url = glance.generate_image_url(orig_image_ref)
extra_usage_info = {'image_ref_url': orig_image_ref_url}
self.conductor_api.notify_usage_exists(context,
@@ -2644,8 +2694,9 @@ def detach_block_devices(context, bdms):
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=[task_states.REBUILD_SPAWNING])
- LOG.info(_("bringing vm to original state: '%s'") % orig_vm_state)
if orig_vm_state == vm_states.STOPPED:
+ LOG.info(_LI("bringing vm to original state: '%s'"),
+ orig_vm_state, instance=instance)
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
@@ -2890,6 +2941,9 @@ def update_task_state(task_state,
msg = _("Image not found during snapshot")
LOG.warn(msg, instance=instance)
+ def _post_interrupted_snapshot_cleanup(self, context, instance):
+ self.driver.post_interrupted_snapshot_cleanup(context, instance)
+
@object_compat
@messaging.expected_exceptions(NotImplementedError)
def volume_snapshot_create(self, context, instance, volume_id,
@@ -2994,7 +3048,7 @@ def set_admin_password(self, context, instance, new_pass):
raise
except Exception as e:
# Catch all here because this could be anything.
- LOG.exception(_('set_admin_password failed: %s') % e,
+ LOG.exception(_LE('set_admin_password failed: %s'), e,
instance=instance)
self._set_instance_obj_error_state(context, instance)
# We create a new exception here so that we won't
@@ -3056,7 +3110,7 @@ def _get_rescue_image(self, context, instance, rescue_image_ref=None):
@wrap_instance_event
@wrap_instance_fault
def rescue_instance(self, context, instance, rescue_password,
- rescue_image_ref=None):
+ rescue_image_ref=None, clean_shutdown=True):
context = context.elevated()
LOG.audit(_('Rescuing'), context=context, instance=instance)
@@ -3075,11 +3129,13 @@ def rescue_instance(self, context, instance, rescue_password,
network_info=network_info)
try:
+ self._power_off_instance(context, instance, clean_shutdown)
+
self.driver.rescue(context, instance,
network_info,
rescue_image_meta, admin_password)
except Exception as e:
- LOG.exception(_("Error trying to Rescue Instance"),
+ LOG.exception(_LE("Error trying to Rescue Instance"),
instance=instance)
raise exception.InstanceNotRescuable(
instance_id=instance.uuid,
@@ -3141,20 +3197,23 @@ def _cleanup_stored_instance_types(self, migration, instance,
instance's system_metadata. Optionally update the "current"
instance_type to the saved old one first.
- Returns the updated system_metadata as a dict, as well as the
- post-cleanup current instance type.
+ Returns the updated system_metadata as a dict, the
+ post-cleanup current instance type and the to-be dropped
+ instance type.
"""
sys_meta = instance.system_metadata
if restore_old:
instance_type = flavors.extract_flavor(instance, 'old_')
+ drop_instance_type = flavors.extract_flavor(instance)
sys_meta = flavors.save_flavor_info(sys_meta, instance_type)
else:
instance_type = flavors.extract_flavor(instance)
+ drop_instance_type = flavors.extract_flavor(instance, 'old_')
flavors.delete_flavor_info(sys_meta, 'old_')
flavors.delete_flavor_info(sys_meta, 'new_')
- return sys_meta, instance_type
+ return sys_meta, instance_type, drop_instance_type
@wrap_exception()
@wrap_instance_event
@@ -3218,8 +3277,8 @@ def _confirm_resize(self, context, instance, quotas,
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
# NOTE(danms): delete stashed migration information
- sys_meta, instance_type = self._cleanup_stored_instance_types(
- migration, instance)
+ sys_meta, instance_type, old_instance_type = (
+ self._cleanup_stored_instance_types(migration, instance))
sys_meta.pop('old_vm_state', None)
instance.system_metadata = sys_meta
@@ -3237,7 +3296,7 @@ def _confirm_resize(self, context, instance, quotas,
migration.save(context.elevated())
rt = self._get_resource_tracker(migration.source_node)
- rt.drop_resize_claim(instance, prefix='old_')
+ rt.drop_resize_claim(instance, old_instance_type)
# NOTE(mriedem): The old_vm_state could be STOPPED but the user
# might have manually powered up the instance to confirm the
@@ -3342,8 +3401,8 @@ def finish_revert_resize(self, context, instance, reservations, migration):
self._notify_about_instance_usage(
context, instance, "resize.revert.start")
- sys_meta, instance_type = self._cleanup_stored_instance_types(
- migration, instance, True)
+ sys_meta, instance_type, drop_instance_type = (
+ self._cleanup_stored_instance_types(migration, instance, True))
# NOTE(mriedem): delete stashed old_vm_state information; we
# default to ACTIVE for backwards compatibility if old_vm_state
@@ -3463,6 +3522,11 @@ def prep_resize(self, context, image, instance, instance_type,
instance_type, quotas,
request_spec, filter_properties,
node)
+ # NOTE(dgenin): This is thrown in LibvirtDriver when the
+ # instance to be migrated is backed by LVM.
+ # Remove when LVM migration is implemented.
+ except exception.MigrationPreCheckError:
+ raise
except Exception:
# try to re-schedule the resize elsewhere:
exc_info = sys.exc_info()
@@ -3492,21 +3556,18 @@ def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
instance_uuid = instance['uuid']
try:
- # NOTE(comstud): remove the scheduler RPCAPI method when
- # this is adjusted to send to conductor... and then
- # deprecate the scheduler manager method.
- scheduler_method = self.scheduler_rpcapi.prep_resize
- instance_p = obj_base.obj_to_primitive(instance)
- method_args = (instance_p, instance_type, image, request_spec,
- filter_properties, quotas.reservations)
+ reschedule_method = self.compute_task_api.resize_instance
+ scheduler_hint = dict(filter_properties=filter_properties)
+ method_args = (instance, None, scheduler_hint, instance_type,
+ quotas.reservations)
task_state = task_states.RESIZE_PREP
rescheduled = self._reschedule(context, request_spec,
- filter_properties, instance, scheduler_method,
+ filter_properties, instance, reschedule_method,
method_args, task_state, exc_info)
except Exception as error:
rescheduled = False
- LOG.exception(_("Error trying to reschedule"),
+ LOG.exception(_LE("Error trying to reschedule"),
instance_uuid=instance_uuid)
compute_utils.add_instance_fault_from_exc(context,
instance, error,
@@ -3530,7 +3591,8 @@ def _reschedule_resize_or_reraise(self, context, image, instance, exc_info,
@errors_out_migration
@wrap_instance_fault
def resize_instance(self, context, instance, image,
- reservations, migration, instance_type):
+ reservations, migration, instance_type,
+ clean_shutdown=True):
"""Starts the migration of a running instance to another host."""
quotas = quotas_obj.Quotas.from_reservations(context,
@@ -3558,10 +3620,13 @@ def resize_instance(self, context, instance, image,
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
+ timeout, retry_interval = self._get_power_off_values(context,
+ instance, clean_shutdown)
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
- block_device_info)
+ block_device_info,
+ timeout, retry_interval)
self._terminate_volume_connections(context, instance, bdms)
@@ -3594,6 +3659,17 @@ def _terminate_volume_connections(self, context, instance, bdms):
self.volume_api.terminate_connection(context, bdm.volume_id,
connector)
+ @staticmethod
+ def _save_instance_info(instance, instance_type, sys_meta):
+ flavors.save_flavor_info(sys_meta, instance_type)
+ instance.instance_type_id = instance_type['id']
+ instance.memory_mb = instance_type['memory_mb']
+ instance.vcpus = instance_type['vcpus']
+ instance.root_gb = instance_type['root_gb']
+ instance.ephemeral_gb = instance_type['ephemeral_gb']
+ instance.system_metadata = sys_meta
+ instance.save()
+
def _finish_resize(self, context, instance, migration, disk_info,
image):
resize_instance = False
@@ -3611,14 +3687,7 @@ def _finish_resize(self, context, instance, migration, disk_info,
if old_instance_type_id != new_instance_type_id:
instance_type = flavors.extract_flavor(instance, prefix='new_')
- flavors.save_flavor_info(sys_meta, instance_type)
- instance.instance_type_id = instance_type['id']
- instance.memory_mb = instance_type['memory_mb']
- instance.vcpus = instance_type['vcpus']
- instance.root_gb = instance_type['root_gb']
- instance.ephemeral_gb = instance_type['ephemeral_gb']
- instance.system_metadata = sys_meta
- instance.save()
+ self._save_instance_info(instance, instance_type, sys_meta)
resize_instance = True
# NOTE(tr3buchet): setup networks on destination host
@@ -3647,11 +3716,18 @@ def _finish_resize(self, context, instance, migration, disk_info,
# NOTE(mriedem): If the original vm_state was STOPPED, we don't
# automatically power on the instance after it's migrated
power_on = old_vm_state != vm_states.STOPPED
- self.driver.finish_migration(context, migration, instance,
- disk_info,
- network_info,
- image, resize_instance,
- block_device_info, power_on)
+
+ try:
+ self.driver.finish_migration(context, migration, instance,
+ disk_info,
+ network_info,
+ image, resize_instance,
+ block_device_info, power_on)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ if resize_instance:
+ self._save_instance_info(instance,
+ old_instance_type, sys_meta)
migration.status = 'finished'
migration.save(context.elevated())
@@ -3686,14 +3762,14 @@ def finish_resize(self, context, disk_info, image, instance,
disk_info, image)
quotas.commit()
except Exception:
- LOG.exception(_('Setting instance vm_state to ERROR'),
+ LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
with excutils.save_and_reraise_exception():
try:
quotas.rollback()
except Exception as qr_error:
- LOG.exception(_("Failed to rollback quota for failed "
- "finish_resize: %s"),
+ LOG.exception(_LE("Failed to rollback quota for failed "
+ "finish_resize: %s"),
qr_error, instance=instance)
self._set_instance_error_state(context, instance)
@@ -3882,7 +3958,8 @@ def resume_instance(self, context, instance):
@reverts_task_state
@wrap_instance_event
@wrap_instance_fault
- def shelve_instance(self, context, instance, image_id):
+ def shelve_instance(self, context, instance, image_id,
+ clean_shutdown=True):
"""Shelve an instance.
This should be used when you want to take a snapshot of the instance.
@@ -3910,7 +3987,7 @@ def update_task_state(task_state, expected_state=task_states.SHELVING):
instance.task_state = task_state
instance.save(expected_task_state=expected_state)
- self.driver.power_off(instance)
+ self._power_off_instance(context, instance, clean_shutdown)
current_power_state = self._get_power_state(context, instance)
self.driver.snapshot(context, instance, image_id, update_task_state)
@@ -4010,10 +4087,10 @@ def _unshelve_instance(self, context, instance, image, filter_properties,
instance.task_state = task_states.SPAWNING
instance.save()
- network_info = self._get_instance_nw_info(context, instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
- block_device_info = self._prep_block_device(context, instance, bdms)
+ block_device_info = self._prep_block_device(context, instance, bdms,
+ do_check_attach=False)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
if node is None:
@@ -4028,6 +4105,9 @@ def _unshelve_instance(self, context, instance, image, filter_properties,
shelved_image_ref = instance.image_ref
instance.image_ref = image['id']
+ self.network_api.migrate_instance_finish(context, instance,
+ {'source_compute': '', 'dest_compute': self.host})
+ network_info = self._get_instance_nw_info(context, instance)
try:
with rt.instance_claim(context, instance, limits):
self.driver.spawn(context, instance, image, injected_files=[],
@@ -4036,7 +4116,8 @@ def _unshelve_instance(self, context, instance, image, filter_properties,
block_device_info=block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_('Instance failed to spawn'), instance=instance)
+ LOG.exception(_LE('Instance failed to spawn'),
+ instance=instance)
if image:
instance.image_ref = shelved_image_ref
@@ -4287,8 +4368,8 @@ def _attach_volume(self, context, instance, bdm):
do_check_attach=False, do_driver_attach=True)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to attach %(volume_id)s "
- "at %(mountpoint)s"),
+ LOG.exception(_LE("Failed to attach %(volume_id)s "
+ "at %(mountpoint)s"),
{'volume_id': bdm.volume_id,
'mountpoint': bdm['mount_device']},
context=context, instance=instance)
@@ -4326,8 +4407,8 @@ def _detach_volume(self, context, instance, bdm):
encryption=encryption)
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
- LOG.exception(_('Failed to detach volume %(volume_id)s '
- 'from %(mp)s'),
+ LOG.exception(_LE('Failed to detach volume %(volume_id)s '
+ 'from %(mp)s'),
{'volume_id': volume_id, 'mp': mp},
context=context, instance=instance)
self.volume_api.roll_detaching(context, volume_id)
@@ -4387,6 +4468,7 @@ def _swap_volume(self, context, instance, bdm, connector, old_volume_id,
mountpoint = bdm['device_name']
failed = False
new_cinfo = None
+ resize_to = 0
try:
old_cinfo, new_cinfo = self._init_volume_connection(context,
new_volume_id,
@@ -4394,22 +4476,27 @@ def _swap_volume(self, context, instance, bdm, connector, old_volume_id,
connector,
instance,
bdm)
- self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint)
+ old_vol_size = self.volume_api.get(context, old_volume_id)['size']
+ new_vol_size = self.volume_api.get(context, new_volume_id)['size']
+ if new_vol_size > old_vol_size:
+ resize_to = new_vol_size
+ self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint,
+ resize_to)
except Exception: # pylint: disable=W0702
failed = True
with excutils.save_and_reraise_exception():
if new_cinfo:
- msg = _("Failed to swap volume %(old_volume_id)s "
- "for %(new_volume_id)s")
- LOG.exception(msg % {'old_volume_id': old_volume_id,
- 'new_volume_id': new_volume_id},
+ msg = _LE("Failed to swap volume %(old_volume_id)s "
+ "for %(new_volume_id)s")
+ LOG.exception(msg, {'old_volume_id': old_volume_id,
+ 'new_volume_id': new_volume_id},
context=context,
instance=instance)
else:
- msg = _("Failed to connect to volume %(volume_id)s "
- "with volume at %(mountpoint)s")
- LOG.exception(msg % {'volume_id': new_volume_id,
- 'mountpoint': bdm['device_name']},
+ msg = _LE("Failed to connect to volume %(volume_id)s "
+ "with volume at %(mountpoint)s")
+ LOG.exception(msg, {'volume_id': new_volume_id,
+ 'mountpoint': bdm['device_name']},
context=context,
instance=instance)
self.volume_api.roll_detaching(context, old_volume_id)
@@ -4493,6 +4580,9 @@ def remove_volume_connection(self, context, volume_id, instance):
pass
@object_compat
+ @wrap_exception()
+ @reverts_task_state
+ @wrap_instance_fault
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
@@ -4501,7 +4591,8 @@ def attach_interface(self, context, instance, network_id, port_id,
if len(network_info) != 1:
LOG.error(_('allocate_port_for_instance returned %(ports)s ports')
% dict(ports=len(network_info)))
- raise exception.InterfaceAttachFailed(instance=instance)
+ raise exception.InterfaceAttachFailed(
+ instance_uuid=instance.uuid)
image_ref = instance.get('image_ref')
image_meta = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
@@ -4510,6 +4601,9 @@ def attach_interface(self, context, instance, network_id, port_id,
return network_info[0]
@object_compat
+ @wrap_exception()
+ @reverts_task_state
+ @wrap_instance_fault
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
network_info = instance.info_cache.network_info
@@ -4608,8 +4702,9 @@ def pre_live_migration(self, context, instance, block_migration, disk,
:param context: security context
:param instance: dict of instance data
:param block_migration: if true, prepare for block migration
- :param migrate_data : if not None, it is a dict which holds data
- required for live migration without shared storage.
+ :param migrate_data: if not None, it is a dict which holds data
+ required for live migration without shared
+ storage.
"""
block_device_info = self._get_instance_block_device_info(
@@ -4685,7 +4780,7 @@ def live_migration(self, context, dest, instance, block_migration,
except Exception:
with excutils.save_and_reraise_exception():
- LOG.exception(_('Pre live migration failed at %s'),
+ LOG.exception(_LE('Pre live migration failed at %s'),
dest, instance=instance)
self._rollback_live_migration(context, instance, dest,
block_migration, migrate_data)
@@ -4756,7 +4851,7 @@ def _post_live_migration(self, ctxt, instance,
# Cleanup source host post live-migration
block_device_info = self._get_instance_block_device_info(
- ctxt, instance, bdms)
+ ctxt, instance, bdms=bdms)
self.driver.post_live_migration(ctxt, instance, block_device_info,
migrate_data)
@@ -4791,6 +4886,17 @@ def _post_live_migration(self, ctxt, instance,
instance,
migration)
+ destroy_vifs = False
+ try:
+ self.driver.post_live_migration_at_source(ctxt, instance,
+ network_info)
+ except NotImplementedError as ex:
+ LOG.debug(ex, instance=instance)
+ # For all hypervisors other than libvirt, there is a possibility
+ # they are unplugging networks from source node in the cleanup
+ # method
+ destroy_vifs = True
+
# Define domain at destination host, without doing it,
# pause/suspend/terminate do not work.
self.compute_rpcapi.post_live_migration_at_destination(ctxt,
@@ -4802,16 +4908,9 @@ def _post_live_migration(self, ctxt, instance,
if do_cleanup:
self.driver.cleanup(ctxt, instance, network_info,
destroy_disks=destroy_disks,
- migrate_data=migrate_data)
- else:
- # self.driver.cleanup() usually performs vif unplugging
- # but we must do it explicitly here when block_migration
- # is false, as the network devices at the source must be
- # torn down
- try:
- self.driver.unplug_vifs(instance, network_info)
- except NotImplementedError as e:
- LOG.debug(e, instance=instance)
+ migrate_data=migrate_data,
+ destroy_vifs=destroy_vifs)
+
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(ctxt, instance,
self.host, teardown=True)
@@ -4879,7 +4978,7 @@ def post_live_migration_at_destination(self, context, instance,
compute_node = self._get_compute_info(context, self.host)
node_name = compute_node.hypervisor_hostname
except exception.NotFound:
- LOG.exception(_('Failed to get compute_info for %s') % self.host)
+ LOG.exception(_LE('Failed to get compute_info for %s'), self.host)
finally:
instance.host = self.host
instance.power_state = current_power_state
@@ -4995,7 +5094,7 @@ def _heal_instance_info_cache(self, context):
# The list of instances to heal is empty so rebuild it
LOG.debug('Rebuilding the list of instances to heal')
db_instances = objects.InstanceList.get_by_host(
- context, self.host, expected_attrs=[], use_slave=True)
+ context, self.host, expected_attrs=[], use_subordinate=True)
for inst in db_instances:
# We don't want to refersh the cache for instances
# which are building or deleting so don't put them
@@ -5025,7 +5124,7 @@ def _heal_instance_info_cache(self, context):
inst = objects.Instance.get_by_uuid(
context, instance_uuids.pop(0),
expected_attrs=['system_metadata', 'info_cache'],
- use_slave=True)
+ use_subordinate=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
@@ -5048,7 +5147,7 @@ def _heal_instance_info_cache(self, context):
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
- self._get_instance_nw_info(context, instance, use_slave=True)
+ self._get_instance_nw_info(context, instance, use_subordinate=True)
LOG.debug('Updated the network info_cache for instance',
instance=instance)
except Exception:
@@ -5064,7 +5163,7 @@ def _poll_rebooting_instances(self, context):
filters = {'task_state': task_states.REBOOTING,
'host': self.host}
rebooting = objects.InstanceList.get_by_filters(
- context, filters, expected_attrs=[], use_slave=True)
+ context, filters, expected_attrs=[], use_subordinate=True)
to_poll = []
for instance in rebooting:
@@ -5081,7 +5180,7 @@ def _poll_rescued_instances(self, context):
'host': self.host}
rescued_instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=["system_metadata"],
- use_slave=True)
+ use_subordinate=True)
to_unrescue = []
for instance in rescued_instances:
@@ -5099,7 +5198,7 @@ def _poll_unconfirmed_resizes(self, context):
migrations = objects.MigrationList.get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host,
- use_slave=True)
+ use_subordinate=True)
migrations_info = dict(migration_count=len(migrations),
confirm_window=CONF.resize_confirm_window)
@@ -5127,7 +5226,7 @@ def _set_migration_to_error(migration, reason, **kwargs):
try:
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
- use_slave=True)
+ use_subordinate=True)
except exception.InstanceNotFound:
reason = (_("Instance %s not found") %
instance_uuid)
@@ -5138,6 +5237,17 @@ def _set_migration_to_error(migration, reason, **kwargs):
_set_migration_to_error(migration, reason,
instance=instance)
continue
+ # race condition: The instance in DELETING state should not be
+ # set the migration state to error, otherwise the instance in
+ # to be deleted which is in RESIZED state
+ # will not be able to confirm resize
+ if instance.task_state in [task_states.DELETING,
+ task_states.SOFT_DELETING]:
+ msg = ("Instance being deleted or soft deleted during resize "
+ "confirmation. Skipping.")
+ LOG.debug(msg, instance=instance)
+ continue
+
vm_state = instance['vm_state']
task_state = instance['task_state']
if vm_state != vm_states.RESIZED or task_state is not None:
@@ -5166,7 +5276,7 @@ def _poll_shelved_instances(self, context):
'host': self.host}
shelved_instances = objects.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
- use_slave=True)
+ use_subordinate=True)
to_gc = []
for instance in shelved_instances:
@@ -5181,7 +5291,7 @@ def _poll_shelved_instances(self, context):
instance.save()
self.shelve_offload_instance(context, instance)
except Exception:
- LOG.exception(_('Periodic task failed to offload instance.'),
+ LOG.exception(_LE('Periodic task failed to offload instance.'),
instance=instance)
@periodic_task.periodic_task
@@ -5197,7 +5307,8 @@ def _instance_usage_audit(self, context):
begin, end = utils.last_completed_audit_period()
instances = objects.InstanceList.get_active_by_window_joined(
context, begin, end, host=self.host,
- expected_attrs=['system_metadata', 'info_cache', 'metadata'])
+ expected_attrs=['system_metadata', 'info_cache', 'metadata'],
+ use_subordinate=True)
num_instances = len(instances)
errors = 0
successes = 0
@@ -5221,9 +5332,9 @@ def _instance_usage_audit(self, context):
ignore_missing_network_data=False)
successes += 1
except Exception:
- LOG.exception(_('Failed to generate usage '
- 'audit for instance '
- 'on host %s') % self.host,
+ LOG.exception(_LE('Failed to generate usage '
+ 'audit for instance '
+ 'on host %s'), self.host,
instance=instance)
errors += 1
compute_utils.finish_instance_usage_audit(context,
@@ -5262,7 +5373,7 @@ def _poll_bandwidth_usage(self, context):
instances = objects.InstanceList.get_by_host(context,
self.host,
- use_slave=True)
+ use_subordinate=True)
try:
bw_counters = self.driver.get_all_bw_counters(instances)
except NotImplementedError:
@@ -5285,7 +5396,7 @@ def _poll_bandwidth_usage(self, context):
last_ctr_in = None
last_ctr_out = None
# TODO(geekinutah): Once bw_usage_cache object is created
- # need to revisit this and slaveify.
+ # need to revisit this and subordinateify.
usage = self.conductor_api.bw_usage_get(context,
bw_ctr['uuid'],
start_time,
@@ -5296,7 +5407,7 @@ def _poll_bandwidth_usage(self, context):
last_ctr_in = usage['last_ctr_in']
last_ctr_out = usage['last_ctr_out']
else:
- # TODO(geekinutah): Same here, pls slaveify
+ # TODO(geekinutah): Same here, pls subordinateify
usage = self.conductor_api.bw_usage_get(
context, bw_ctr['uuid'], prev_time,
bw_ctr['mac_address'])
@@ -5329,13 +5440,13 @@ def _poll_bandwidth_usage(self, context):
last_refreshed=refreshed,
update_cells=update_cells)
- def _get_host_volume_bdms(self, context):
+ def _get_host_volume_bdms(self, context, use_subordinate=False):
"""Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = objects.InstanceList.get_by_host(context, self.host)
for instance in instances:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
- context, instance.uuid)
+ context, instance.uuid, use_subordinate=use_subordinate)
instance_bdms = [bdm for bdm in bdms if bdm.is_volume]
compute_host_bdms.append(dict(instance=instance,
instance_bdms=instance_bdms))
@@ -5362,7 +5473,8 @@ def _poll_volume_usage(self, context, start_time=None):
if not start_time:
start_time = utils.last_completed_audit_period()[1]
- compute_host_bdms = self._get_host_volume_bdms(context)
+ compute_host_bdms = self._get_host_volume_bdms(context,
+ use_subordinate=True)
if not compute_host_bdms:
return
@@ -5389,7 +5501,7 @@ def _sync_power_states(self, context):
"""
db_instances = objects.InstanceList.get_by_host(context,
self.host,
- use_slave=True)
+ use_subordinate=True)
num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
@@ -5401,9 +5513,9 @@ def _sync_power_states(self, context):
'num_vm_instances': num_vm_instances})
for db_instance in db_instances:
- #NOTE(melwitt): This must be synchronized as we query state from
- # two separate sources, the driver and the database.
- # They are set (in stop_instance) and read, in sync.
+ # NOTE(melwitt): This must be synchronized as we query state from
+ # two separate sources, the driver and the database.
+ # They are set (in stop_instance) and read, in sync.
@utils.synchronized(db_instance.uuid)
def query_driver_power_state_and_sync():
self._query_driver_power_state_and_sync(context, db_instance)
@@ -5433,14 +5545,14 @@ def _query_driver_power_state_and_sync(self, context, db_instance):
self._sync_instance_power_state(context,
db_instance,
vm_power_state,
- use_slave=True)
+ use_subordinate=True)
except exception.InstanceNotFound:
# NOTE(hanlind): If the instance gets deleted during sync,
# silently ignore.
pass
def _sync_instance_power_state(self, context, db_instance, vm_power_state,
- use_slave=False):
+ use_subordinate=False):
"""Align instance power state between the database and hypervisor.
If the instance is not found on the hypervisor, but is in the database,
@@ -5449,7 +5561,7 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state,
# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
- db_instance.refresh(use_slave=use_slave)
+ db_instance.refresh(use_subordinate=use_subordinate)
db_power_state = db_instance.power_state
vm_state = db_instance.vm_state
@@ -5505,14 +5617,17 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state,
# Note(maoy): here we call the API instead of
# brutally updating the vm_state in the database
# to allow all the hooks and checks to be performed.
- self.compute_api.stop(context, db_instance)
+ if db_instance.shutdown_terminate:
+ self.compute_api.delete(context, db_instance)
+ else:
+ self.compute_api.stop(context, db_instance)
except Exception:
# Note(maoy): there is no need to propagate the error
# because the same power_state will be retrieved next
# time and retried.
# For example, there might be another task scheduled.
- LOG.exception(_("error during stop() in "
- "sync_power_state."),
+ LOG.exception(_LE("error during stop() in "
+ "sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.SUSPENDED:
LOG.warn(_("Instance is suspended unexpectedly. Calling "
@@ -5520,8 +5635,8 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state,
try:
self.compute_api.stop(context, db_instance)
except Exception:
- LOG.exception(_("error during stop() in "
- "sync_power_state."),
+ LOG.exception(_LE("error during stop() in "
+ "sync_power_state."),
instance=db_instance)
elif vm_power_state == power_state.PAUSED:
# Note(maoy): a VM may get into the paused state not only
@@ -5551,8 +5666,8 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state,
# instance.
self.compute_api.force_stop(context, db_instance)
except Exception:
- LOG.exception(_("error during stop() in "
- "sync_power_state."),
+ LOG.exception(_LE("error during stop() in "
+ "sync_power_state."),
instance=db_instance)
elif vm_state == vm_states.PAUSED:
if vm_power_state in (power_state.SHUTDOWN,
@@ -5562,8 +5677,8 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state,
try:
self.compute_api.force_stop(context, db_instance)
except Exception:
- LOG.exception(_("error during stop() in "
- "sync_power_state."),
+ LOG.exception(_LE("error during stop() in "
+ "sync_power_state."),
instance=db_instance)
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
@@ -5582,11 +5697,12 @@ def _reclaim_queued_deletes(self, context):
LOG.debug("CONF.reclaim_instance_interval <= 0, skipping...")
return
- # FIXME(comstud): Dummy quota object for now. See bug 1296414.
- # We have potential for inconsistency if we decide here to not
- # update quotas. _delete_instance() should determine whether or
- # not to update quotas based on if instance is in a SOFT_DELETED
- # state or not.
+ # TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414.
+ # The only case that the quota might be inconsistent is
+ # the compute node died between set instance state to SOFT_DELETED
+ # and quota commit to DB. When compute node starts again
+ # it will have no idea the reservation is committed or not or even
+ # expired, since it's a rare case, so marked as todo.
quotas = quotas_obj.Quotas.from_reservations(context, None)
filters = {'vm_state': vm_states.SOFT_DELETED,
@@ -5595,7 +5711,7 @@ def _reclaim_queued_deletes(self, context):
instances = objects.InstanceList.get_by_filters(
context, filters,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
- use_slave=True)
+ use_subordinate=True)
for instance in instances:
if self._deleted_old_enough(instance, interval):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
@@ -5674,7 +5790,7 @@ def _cleanup_running_deleted_instances(self, context):
with utils.temporary_mutation(context, read_deleted="yes"):
for instance in self._running_deleted_instances(context):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
- context, instance.uuid, use_slave=True)
+ context, instance.uuid, use_subordinate=True)
if action == "log":
LOG.warning(_("Detected instance with name label "
@@ -5764,7 +5880,7 @@ def _error_out_instance_on_exception(self, context, instance,
task_state=None)
raise error.inner_exception
except Exception:
- LOG.exception(_('Setting instance vm_state to ERROR'),
+ LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance_uuid=instance_uuid)
with excutils.save_and_reraise_exception():
if quotas:
@@ -5773,11 +5889,11 @@ def _error_out_instance_on_exception(self, context, instance,
@aggregate_object_compat
@wrap_exception()
- def add_aggregate_host(self, context, aggregate, host, slave_info):
+ def add_aggregate_host(self, context, aggregate, host, subordinate_info):
"""Notify hypervisor of change (for hypervisor pools)."""
try:
self.driver.add_to_aggregate(context, aggregate, host,
- slave_info=slave_info)
+ subordinate_info=subordinate_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'add_aggregate_host')
@@ -5790,11 +5906,11 @@ def add_aggregate_host(self, context, aggregate, host, slave_info):
@aggregate_object_compat
@wrap_exception()
- def remove_aggregate_host(self, context, host, slave_info, aggregate):
+ def remove_aggregate_host(self, context, host, subordinate_info, aggregate):
"""Removes a host from a physical hypervisor pool."""
try:
self.driver.remove_from_aggregate(context, aggregate, host,
- slave_info=slave_info)
+ subordinate_info=subordinate_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'remove_aggregate_host')
@@ -5852,7 +5968,7 @@ def _run_image_cache_manager_pass(self, context):
'soft_deleted': True,
'host': nodes}
filtered_instances = objects.InstanceList.get_by_filters(context,
- filters, expected_attrs=[], use_slave=True)
+ filters, expected_attrs=[], use_subordinate=True)
self.driver.manage_image_cache(context, filtered_instances)
@@ -5870,7 +5986,7 @@ def _run_pending_deletes(self, context):
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
- context, filters, expected_attrs=attrs)
+ context, filters, expected_attrs=attrs, use_subordinate=True)
LOG.debug('There are %d instances to clean', len(instances))
for instance in instances:
diff --git a/nova/compute/monitors/__init__.py b/nova/compute/monitors/__init__.py
index 511738599f..fab30ada13 100644
--- a/nova/compute/monitors/__init__.py
+++ b/nova/compute/monitors/__init__.py
@@ -26,8 +26,8 @@
from oslo.config import cfg
import six
+from nova.i18n import _
from nova import loadables
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/compute/monitors/virt/cpu_monitor.py b/nova/compute/monitors/virt/cpu_monitor.py
index 1cc92db370..9295120769 100644
--- a/nova/compute/monitors/virt/cpu_monitor.py
+++ b/nova/compute/monitors/virt/cpu_monitor.py
@@ -22,7 +22,7 @@
from nova.compute import monitors
from nova.compute.monitors import cpu_monitor as monitor
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index aaf0eb3890..fb54a925e7 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -24,14 +24,15 @@
from nova.compute import claims
from nova.compute import flavors
from nova.compute import monitors
+from nova.compute import resources as ext_resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import conductor
from nova import context
from nova import exception
+from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -46,7 +47,10 @@
help='Amount of memory in MB to reserve for the host'),
cfg.StrOpt('compute_stats_class',
default='nova.compute.stats.Stats',
- help='Class that will manage stats for the local compute host')
+ help='Class that will manage stats for the local compute host'),
+ cfg.ListOpt('compute_resources',
+ default=['vcpu'],
+ help='The names of the extra resources to track.'),
]
CONF = cfg.CONF
@@ -75,7 +79,10 @@ def __init__(self, host, driver, nodename):
self.conductor_api = conductor.API()
monitor_handler = monitors.ResourceMonitorHandler()
self.monitors = monitor_handler.choose_monitors(self)
+ self.ext_resources_handler = \
+ ext_resources.ResourceHandler(CONF.compute_resources)
self.notifier = rpc.get_notifier()
+ self.old_resources = {}
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance_ref, limits=None):
@@ -138,10 +145,10 @@ def resize_claim(self, context, instance, instance_type, limits=None):
:param instance: instance object to reserve resources for
:param instance_type: new instance_type being resized to
:param limits: Dict of oversubscription limits for memory, disk,
- and CPUs.
+ and CPUs
:returns: A Claim ticket representing the reserved resources. This
- should be turned into finalize a resource claim or free
- resources after the compute operation is finished.
+ should be turned into finalize a resource claim or free
+ resources after the compute operation is finished.
"""
if self.disabled:
# compute_driver doesn't support resource tracking, just
@@ -228,12 +235,10 @@ def drop_resize_claim(self, instance, instance_type=None, prefix='new_'):
instance_type = self._get_instance_type(ctxt, instance, prefix)
if instance_type['id'] == itype['id']:
- self.stats.update_stats_for_migration(itype, sign=-1)
if self.pci_tracker:
self.pci_tracker.update_pci_for_migration(instance,
sign=-1)
self._update_usage(self.compute_node, itype, sign=-1)
- self.compute_node['stats'] = jsonutils.dumps(self.stats)
ctxt = context.get_admin_context()
self._update(ctxt, self.compute_node)
@@ -376,9 +381,20 @@ def _sync_compute_node(self, context, resources):
LOG.info(_('Compute_service record updated for %(host)s:%(node)s')
% {'host': self.host, 'node': self.nodename})
+ def _write_ext_resources(self, resources):
+ resources['stats'] = {}
+ resources['stats'].update(self.stats)
+ self.ext_resources_handler.write_resources(resources)
+
def _create(self, context, values):
"""Create the compute node in the DB."""
# initialize load stats from existing instances:
+ self._write_ext_resources(values)
+ # NOTE(pmurray): the stats field is stored as a json string. The
+ # json conversion will be done automatically by the ComputeNode object
+ # so this can be removed when using ComputeNode.
+ values['stats'] = jsonutils.dumps(values['stats'])
+
self.compute_node = self.conductor_api.compute_node_create(context,
values)
@@ -422,25 +438,45 @@ def _report_hypervisor_resource_view(self, resources):
LOG.debug("Hypervisor: no assignable PCI devices")
def _report_final_resource_view(self, resources):
- """Report final calculate of free memory, disk, CPUs, and PCI devices,
+ """Report final calculate of physical memory, used virtual memory,
+ disk, usable vCPUs, used virtual CPUs and PCI devices,
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
- LOG.audit(_("Free ram (MB): %s") % resources['free_ram_mb'])
+ LOG.audit(_("Total physical ram (MB): %(pram)s, "
+ "total allocated virtual ram (MB): %(vram)s"),
+ {'pram': resources['memory_mb'],
+ 'vram': resources['memory_mb_used']})
LOG.audit(_("Free disk (GB): %s") % resources['free_disk_gb'])
vcpus = resources['vcpus']
if vcpus:
- free_vcpus = vcpus - resources['vcpus_used']
- LOG.audit(_("Free VCPUS: %s") % free_vcpus)
+ LOG.audit(_("Total usable vcpus: %(tcpu)s, "
+ "total allocated vcpus: %(ucpu)s"),
+ {'tcpu': vcpus, 'ucpu': resources['vcpus_used']})
else:
LOG.audit(_("Free VCPU information unavailable"))
if 'pci_stats' in resources:
LOG.audit(_("PCI stats: %s"), resources['pci_stats'])
+ def _resource_change(self, resources):
+ """Check to see if any resouces have changed."""
+ if cmp(resources, self.old_resources) != 0:
+ self.old_resources = resources
+ return True
+ return False
+
def _update(self, context, values):
"""Persist the compute node updates to the DB."""
+ self._write_ext_resources(values)
+ # NOTE(pmurray): the stats field is stored as a json string. The
+ # json conversion will be done automatically by the ComputeNode object
+ # so this can be removed when using ComputeNode.
+ values['stats'] = jsonutils.dumps(values['stats'])
+
+ if not self._resource_change(values):
+ return
if "service" in self.compute_node:
del self.compute_node['service']
self.compute_node = self.conductor_api.compute_node_update(
@@ -465,7 +501,7 @@ def _update_usage(self, resources, usage, sign=1):
resources['local_gb_used'])
resources['running_vms'] = self.stats.num_instances
- resources['vcpus_used'] = self.stats.num_vcpus_used
+ self.ext_resources_handler.update_from_instance(usage, sign)
def _update_usage_from_migration(self, context, instance, resources,
migration):
@@ -508,11 +544,9 @@ def _update_usage_from_migration(self, context, instance, resources,
migration['old_instance_type_id'])
if itype:
- self.stats.update_stats_for_migration(itype)
if self.pci_tracker:
self.pci_tracker.update_pci_for_migration(instance)
self._update_usage(resources, itype)
- resources['stats'] = jsonutils.dumps(self.stats)
if self.pci_tracker:
resources['pci_stats'] = jsonutils.dumps(
self.pci_tracker.stats)
@@ -585,7 +619,6 @@ def _update_usage_from_instance(self, resources, instance):
self._update_usage(resources, instance, sign=sign)
resources['current_workload'] = self.stats.calculate_workload()
- resources['stats'] = jsonutils.dumps(self.stats)
if self.pci_tracker:
resources['pci_stats'] = jsonutils.dumps(self.pci_tracker.stats)
else:
@@ -599,13 +632,13 @@ def _update_usage_from_instances(self, resources, instances):
"""
self.tracked_instances.clear()
- # purge old stats
+ # purge old stats and init with anything passed in by the driver
self.stats.clear()
+ self.stats.digest_stats(resources.get('stats'))
# set some initial values, reserve room for host/hypervisor:
resources['local_gb_used'] = CONF.reserved_host_disk_mb / 1024
resources['memory_mb_used'] = CONF.reserved_host_memory_mb
- resources['vcpus_used'] = 0
resources['free_ram_mb'] = (resources['memory_mb'] -
resources['memory_mb_used'])
resources['free_disk_gb'] = (resources['local_gb'] -
@@ -613,10 +646,11 @@ def _update_usage_from_instances(self, resources, instances):
resources['current_workload'] = 0
resources['running_vms'] = 0
+ # Reset values for extended resources
+ self.ext_resources_handler.reset_resources(resources, self.driver)
+
for instance in instances:
- if instance['vm_state'] == vm_states.DELETED:
- continue
- else:
+ if instance['vm_state'] != vm_states.DELETED:
self._update_usage_from_instance(resources, instance)
def _find_orphaned_instances(self):
diff --git a/nova/compute/resources/__init__.py b/nova/compute/resources/__init__.py
new file mode 100644
index 0000000000..cb023ea523
--- /dev/null
+++ b/nova/compute/resources/__init__.py
@@ -0,0 +1,133 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stevedore
+
+from nova.i18n import _LW
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+RESOURCE_NAMESPACE = 'nova.compute.resources'
+
+
+class ResourceHandler():
+
+ def _log_missing_plugins(self, names):
+ for name in names:
+ if name not in self._mgr.names():
+ LOG.warn(_LW('Compute resource plugin %s was not loaded') %
+ name)
+
+ def __init__(self, names, propagate_map_exceptions=False):
+ """Initialise the resource handler by loading the plugins.
+
+ The ResourceHandler uses stevedore to load the resource plugins.
+ The handler can handle and report exceptions raised in the plugins
+ depending on the value of the propagate_map_exceptions parameter.
+ It is useful in testing to propagate exceptions so they are exposed
+ as part of the test. If exceptions are not propagated they are
+ logged at error level.
+
+ Any named plugins that are not located are logged.
+
+ :param names: the list of plugins to load by name
+ :param propagate_map_exceptions: True indicates exceptions in the
+ plugins should be raised, False indicates they should be handled and
+ logged.
+ """
+ self._mgr = stevedore.NamedExtensionManager(
+ namespace=RESOURCE_NAMESPACE,
+ names=names,
+ propagate_map_exceptions=propagate_map_exceptions,
+ invoke_on_load=True)
+ self._log_missing_plugins(names)
+
+ def reset_resources(self, resources, driver):
+ """Reset the resources to their initial state.
+
+ Each plugin is called to reset its state. The resources data provided
+ is initial state gathered from the hypervisor. The driver is also
+ provided in case the plugin needs to obtain additional information
+ from the driver, for example, the memory calculation obtains
+ the memory overhead from the driver.
+
+ :param resources: the resources reported by the hypervisor
+ :param driver: the driver for the hypervisor
+
+ :returns: None
+ """
+ if self._mgr.extensions:
+ self._mgr.map_method('reset', resources, driver)
+
+ def test_resources(self, usage, limits):
+ """Test the ability to support the given instance.
+
+ Each resource plugin is called to determine if it's resource is able
+ to support the additional requirements of a new instance. The
+ plugins either return None to indicate they have sufficient resource
+ available or a human readable string to indicate why they can not.
+
+ :param usage: the additional resource usage
+ :param limits: limits used for the calculation
+
+ :returns: a list or return values from the plugins
+ """
+ if not self._mgr.extensions:
+ return []
+
+ reasons = self._mgr.map_method('test', usage, limits)
+ return reasons
+
+ def update_from_instance(self, usage, sign=1):
+ """Update the resource information to reflect the allocation for
+ an instance with the given resource usage.
+
+ :param usage: the resource usage of the instance
+ :param sign: has value 1 or -1. 1 indicates the instance is being
+ added to the current usage, -1 indicates the instance is being removed.
+
+ :returns: None
+ """
+ if not self._mgr.extensions:
+ return
+
+ if sign == 1:
+ self._mgr.map_method('add_instance', usage)
+ else:
+ self._mgr.map_method('remove_instance', usage)
+
+ def write_resources(self, resources):
+ """Write the resource data to populate the resources.
+
+ Each resource plugin is called to write its resource data to
+ resources.
+
+ :param resources: the compute node resources
+
+ :returns: None
+ """
+ if self._mgr.extensions:
+ self._mgr.map_method('write', resources)
+
+ def report_free_resources(self):
+ """Each resource plugin is called to log free resource information.
+
+ :returns: None
+ """
+ if not self._mgr.extensions:
+ return
+
+ self._mgr.map_method('report_free')
diff --git a/nova/compute/resources/base.py b/nova/compute/resources/base.py
new file mode 100644
index 0000000000..a04fc9f97f
--- /dev/null
+++ b/nova/compute/resources/base.py
@@ -0,0 +1,93 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import abc
+
+import six
+
+
+@six.add_metaclass(abc.ABCMeta)
+class Resource(object):
+ """This base class defines the interface used for compute resource
+ plugins. It is not necessary to use this base class, but all compute
+ resource plugins must implement the abstract methods found here.
+ An instance of the plugin object is instantiated when it is loaded
+ by calling __init__() with no parameters.
+ """
+
+ @abc.abstractmethod
+ def reset(self, resources, driver):
+ """Set the resource to an initial state based on the resource
+ view discovered from the hypervisor.
+ """
+ pass
+
+ @abc.abstractmethod
+ def test(self, usage, limits):
+ """Test to see if we have sufficient resources to allocate for
+ an instance with the given resource usage.
+
+ :param usage: the resource usage of the instances
+ :param limits: limits to apply
+
+ :returns: None if the test passes or a string describing the reason
+ why the test failed
+ """
+ pass
+
+ @abc.abstractmethod
+ def add_instance(self, usage):
+ """Update resource information adding allocation according to the
+ given resource usage.
+
+ :param usage: the resource usage of the instance being added
+
+ :returns: None
+ """
+ pass
+
+ @abc.abstractmethod
+ def remove_instance(self, usage):
+ """Update resource information removing allocation according to the
+ given resource usage.
+
+ :param usage: the resource usage of the instance being removed
+
+ :returns: None
+
+ """
+ pass
+
+ @abc.abstractmethod
+ def write(self, resources):
+ """Write resource data to populate resources.
+
+ :param resources: the resources data to be populated
+
+ :returns: None
+ """
+ pass
+
+ @abc.abstractmethod
+ def report_free(self):
+ """Log free resources.
+
+ This method logs how much free resource is held by
+ the resource plugin.
+
+ :returns: None
+ """
+ pass
diff --git a/nova/compute/resources/vcpu.py b/nova/compute/resources/vcpu.py
new file mode 100644
index 0000000000..e7290a3e1a
--- /dev/null
+++ b/nova/compute/resources/vcpu.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute.resources import base
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+class VCPU(base.Resource):
+ """VCPU compute resource plugin.
+
+ This is effectively a simple counter based on the vcpu requirement of each
+ instance.
+ """
+ def __init__(self):
+ # initialize to a 'zero' resource.
+ # reset will be called to set real resource values
+ self._total = 0
+ self._used = 0
+
+ def reset(self, resources, driver):
+ # total vcpu is reset to the value taken from resources.
+ self._total = int(resources['vcpus'])
+ self._used = 0
+
+ def _get_requested(self, usage):
+ return int(usage.get('vcpus', 0))
+
+ def _get_limit(self, limits):
+ if limits and 'vcpu' in limits:
+ return int(limits.get('vcpu'))
+
+ def test(self, usage, limits):
+ requested = self._get_requested(usage)
+ limit = self._get_limit(limits)
+
+ LOG.debug('Total CPUs: %(total)d VCPUs, used: %(used).02f VCPUs' %
+ {'total': self._total, 'used': self._used})
+
+ if limit is None:
+ # treat resource as unlimited:
+ LOG.debug('CPUs limit not specified, defaulting to unlimited')
+ return
+
+ free = limit - self._used
+
+ # Oversubscribed resource policy info:
+ LOG.debug('CPUs limit: %(limit).02f VCPUs, free: %(free).02f VCPUs' %
+ {'limit': limit, 'free': free})
+
+ if requested > free:
+ return ('Free CPUs %(free).02f VCPUs < '
+ 'requested %(requested)d VCPUs' %
+ {'free': free, 'requested': requested})
+
+ def add_instance(self, usage):
+ requested = int(usage.get('vcpus', 0))
+ self._used += requested
+
+ def remove_instance(self, usage):
+ requested = int(usage.get('vcpus', 0))
+ self._used -= requested
+
+ def write(self, resources):
+ resources['vcpus'] = self._total
+ resources['vcpus_used'] = self._used
+
+ def report_free(self):
+ free_vcpus = self._total - self._used
+ LOG.debug('Free VCPUs: %s' % free_vcpus)
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 7dc4543c50..9f7a602e3a 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -21,8 +21,8 @@
from nova import block_device
from nova import exception
+from nova.i18n import _
from nova.objects import base as objects_base
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova import rpc
@@ -67,199 +67,207 @@ class ComputeAPI(object):
API version history:
- 1.0 - Initial version.
- 1.1 - Adds get_host_uptime()
- 1.2 - Adds check_can_live_migrate_[destination|source]
- 1.3 - Adds change_instance_metadata()
- 1.4 - Remove instance_uuid, add instance argument to reboot_instance()
- 1.5 - Remove instance_uuid, add instance argument to pause_instance(),
- unpause_instance()
- 1.6 - Remove instance_uuid, add instance argument to suspend_instance()
- 1.7 - Remove instance_uuid, add instance argument to
- get_console_output()
- 1.8 - Remove instance_uuid, add instance argument to
- add_fixed_ip_to_instance()
- 1.9 - Remove instance_uuid, add instance argument to attach_volume()
- 1.10 - Remove instance_id, add instance argument to
- check_can_live_migrate_destination()
- 1.11 - Remove instance_id, add instance argument to
- check_can_live_migrate_source()
- 1.12 - Remove instance_uuid, add instance argument to confirm_resize()
- 1.13 - Remove instance_uuid, add instance argument to detach_volume()
- 1.14 - Remove instance_uuid, add instance argument to finish_resize()
- 1.15 - Remove instance_uuid, add instance argument to
- finish_revert_resize()
- 1.16 - Remove instance_uuid, add instance argument to get_diagnostics()
- 1.17 - Remove instance_uuid, add instance argument to get_vnc_console()
- 1.18 - Remove instance_uuid, add instance argument to inject_file()
- 1.19 - Remove instance_uuid, add instance argument to
- inject_network_info()
- 1.20 - Remove instance_id, add instance argument to
- post_live_migration_at_destination()
- 1.21 - Remove instance_uuid, add instance argument to
- power_off_instance() and stop_instance()
- 1.22 - Remove instance_uuid, add instance argument to
- power_on_instance() and start_instance()
- 1.23 - Remove instance_id, add instance argument to
- pre_live_migration()
- 1.24 - Remove instance_uuid, add instance argument to
- rebuild_instance()
- 1.25 - Remove instance_uuid, add instance argument to
- remove_fixed_ip_from_instance()
- 1.26 - Remove instance_id, add instance argument to
- remove_volume_connection()
- 1.27 - Remove instance_uuid, add instance argument to
- rescue_instance()
- 1.28 - Remove instance_uuid, add instance argument to reset_network()
- 1.29 - Remove instance_uuid, add instance argument to resize_instance()
- 1.30 - Remove instance_uuid, add instance argument to resume_instance()
- 1.31 - Remove instance_uuid, add instance argument to revert_resize()
- 1.32 - Remove instance_id, add instance argument to
- rollback_live_migration_at_destination()
- 1.33 - Remove instance_uuid, add instance argument to
- set_admin_password()
- 1.34 - Remove instance_uuid, add instance argument to
- snapshot_instance()
- 1.35 - Remove instance_uuid, add instance argument to
- unrescue_instance()
- 1.36 - Remove instance_uuid, add instance argument to
- change_instance_metadata()
- 1.37 - Remove instance_uuid, add instance argument to
- terminate_instance()
- 1.38 - Changes to prep_resize():
- - remove instance_uuid, add instance
- - remove instance_type_id, add instance_type
- - remove topic, it was unused
- 1.39 - Remove instance_uuid, add instance argument to run_instance()
- 1.40 - Remove instance_id, add instance argument to live_migration()
- 1.41 - Adds refresh_instance_security_rules()
- 1.42 - Add reservations arg to prep_resize(), resize_instance(),
- finish_resize(), confirm_resize(), revert_resize() and
- finish_revert_resize()
- 1.43 - Add migrate_data to live_migration()
- 1.44 - Adds reserve_block_device_name()
-
- 2.0 - Remove 1.x backwards compat
- 2.1 - Adds orig_sys_metadata to rebuild_instance()
- 2.2 - Adds slave_info parameter to add_aggregate_host() and
- remove_aggregate_host()
- 2.3 - Adds volume_id to reserve_block_device_name()
- 2.4 - Add bdms to terminate_instance
- 2.5 - Add block device and network info to reboot_instance
- 2.6 - Remove migration_id, add migration to resize_instance
- 2.7 - Remove migration_id, add migration to confirm_resize
- 2.8 - Remove migration_id, add migration to finish_resize
- 2.9 - Add publish_service_capabilities()
- 2.10 - Adds filter_properties and request_spec to prep_resize()
- 2.11 - Adds soft_delete_instance() and restore_instance()
- 2.12 - Remove migration_id, add migration to revert_resize
- 2.13 - Remove migration_id, add migration to finish_revert_resize
- 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
- 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
- 2.16 - Add instance_type to resize_instance
- 2.17 - Add get_backdoor_port()
- 2.18 - Add bdms to rebuild_instance
- 2.19 - Add node to run_instance
- 2.20 - Add node to prep_resize
- 2.21 - Add migrate_data dict param to pre_live_migration()
- 2.22 - Add recreate, on_shared_storage and host arguments to
- rebuild_instance()
- 2.23 - Remove network_info from reboot_instance
- 2.24 - Added get_spice_console method
- 2.25 - Add attach_interface() and detach_interface()
- 2.26 - Add validate_console_port to ensure the service connects to
- vnc on the correct port
- 2.27 - Adds 'reservations' to terminate_instance() and
- soft_delete_instance()
+ * 1.0 - Initial version.
+ * 1.1 - Adds get_host_uptime()
+ * 1.2 - Adds check_can_live_migrate_[destination|source]
+ * 1.3 - Adds change_instance_metadata()
+ * 1.4 - Remove instance_uuid, add instance argument to
+ reboot_instance()
+ * 1.5 - Remove instance_uuid, add instance argument to
+ pause_instance(), unpause_instance()
+ * 1.6 - Remove instance_uuid, add instance argument to
+ suspend_instance()
+ * 1.7 - Remove instance_uuid, add instance argument to
+ get_console_output()
+ * 1.8 - Remove instance_uuid, add instance argument to
+ add_fixed_ip_to_instance()
+ * 1.9 - Remove instance_uuid, add instance argument to attach_volume()
+ * 1.10 - Remove instance_id, add instance argument to
+ check_can_live_migrate_destination()
+ * 1.11 - Remove instance_id, add instance argument to
+ check_can_live_migrate_source()
+ * 1.12 - Remove instance_uuid, add instance argument to
+ confirm_resize()
+ * 1.13 - Remove instance_uuid, add instance argument to detach_volume()
+ * 1.14 - Remove instance_uuid, add instance argument to finish_resize()
+ * 1.15 - Remove instance_uuid, add instance argument to
+ finish_revert_resize()
+ * 1.16 - Remove instance_uuid, add instance argument to
+ get_diagnostics()
+ * 1.17 - Remove instance_uuid, add instance argument to
+ get_vnc_console()
+ * 1.18 - Remove instance_uuid, add instance argument to inject_file()
+ * 1.19 - Remove instance_uuid, add instance argument to
+ inject_network_info()
+ * 1.20 - Remove instance_id, add instance argument to
+ post_live_migration_at_destination()
+ * 1.21 - Remove instance_uuid, add instance argument to
+ power_off_instance() and stop_instance()
+ * 1.22 - Remove instance_uuid, add instance argument to
+ power_on_instance() and start_instance()
+ * 1.23 - Remove instance_id, add instance argument to
+ pre_live_migration()
+ * 1.24 - Remove instance_uuid, add instance argument to
+ rebuild_instance()
+ * 1.25 - Remove instance_uuid, add instance argument to
+ remove_fixed_ip_from_instance()
+ * 1.26 - Remove instance_id, add instance argument to
+ remove_volume_connection()
+ * 1.27 - Remove instance_uuid, add instance argument to
+ rescue_instance()
+ * 1.28 - Remove instance_uuid, add instance argument to reset_network()
+ * 1.29 - Remove instance_uuid, add instance argument to
+ resize_instance()
+ * 1.30 - Remove instance_uuid, add instance argument to
+ resume_instance()
+ * 1.31 - Remove instance_uuid, add instance argument to revert_resize()
+ * 1.32 - Remove instance_id, add instance argument to
+ rollback_live_migration_at_destination()
+ * 1.33 - Remove instance_uuid, add instance argument to
+ set_admin_password()
+ * 1.34 - Remove instance_uuid, add instance argument to
+ snapshot_instance()
+ * 1.35 - Remove instance_uuid, add instance argument to
+ unrescue_instance()
+ * 1.36 - Remove instance_uuid, add instance argument to
+ change_instance_metadata()
+ * 1.37 - Remove instance_uuid, add instance argument to
+ terminate_instance()
+ * 1.38 - Changes to prep_resize():
+ * remove instance_uuid, add instance
+ * remove instance_type_id, add instance_type
+ * remove topic, it was unused
+ * 1.39 - Remove instance_uuid, add instance argument to run_instance()
+ * 1.40 - Remove instance_id, add instance argument to live_migration()
+ * 1.41 - Adds refresh_instance_security_rules()
+ * 1.42 - Add reservations arg to prep_resize(), resize_instance(),
+ finish_resize(), confirm_resize(), revert_resize() and
+ finish_revert_resize()
+ * 1.43 - Add migrate_data to live_migration()
+ * 1.44 - Adds reserve_block_device_name()
+
+ * 2.0 - Remove 1.x backwards compat
+ * 2.1 - Adds orig_sys_metadata to rebuild_instance()
+ * 2.2 - Adds subordinate_info parameter to add_aggregate_host() and
+ remove_aggregate_host()
+ * 2.3 - Adds volume_id to reserve_block_device_name()
+ * 2.4 - Add bdms to terminate_instance
+ * 2.5 - Add block device and network info to reboot_instance
+ * 2.6 - Remove migration_id, add migration to resize_instance
+ * 2.7 - Remove migration_id, add migration to confirm_resize
+ * 2.8 - Remove migration_id, add migration to finish_resize
+ * 2.9 - Add publish_service_capabilities()
+ * 2.10 - Adds filter_properties and request_spec to prep_resize()
+ * 2.11 - Adds soft_delete_instance() and restore_instance()
+ * 2.12 - Remove migration_id, add migration to revert_resize
+ * 2.13 - Remove migration_id, add migration to finish_revert_resize
+ * 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
+ * 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
+ * 2.16 - Add instance_type to resize_instance
+ * 2.17 - Add get_backdoor_port()
+ * 2.18 - Add bdms to rebuild_instance
+ * 2.19 - Add node to run_instance
+ * 2.20 - Add node to prep_resize
+ * 2.21 - Add migrate_data dict param to pre_live_migration()
+ * 2.22 - Add recreate, on_shared_storage and host arguments to
+ rebuild_instance()
+ * 2.23 - Remove network_info from reboot_instance
+ * 2.24 - Added get_spice_console method
+ * 2.25 - Add attach_interface() and detach_interface()
+ * 2.26 - Add validate_console_port to ensure the service connects to
+ vnc on the correct port
+ * 2.27 - Adds 'reservations' to terminate_instance() and
+ soft_delete_instance()
... Grizzly supports message version 2.27. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.27.
- 2.28 - Adds check_instance_shared_storage()
- 2.29 - Made start_instance() and stop_instance() take new-world
- instance objects
- 2.30 - Adds live_snapshot_instance()
- 2.31 - Adds shelve_instance(), shelve_offload_instance, and
- unshelve_instance()
- 2.32 - Make reboot_instance take a new world instance object
- 2.33 - Made suspend_instance() and resume_instance() take new-world
- instance objects
- 2.34 - Added swap_volume()
- 2.35 - Made terminate_instance() and soft_delete_instance() take
- new-world instance objects
- 2.36 - Made pause_instance() and unpause_instance() take new-world
- instance objects
- 2.37 - Added the legacy_bdm_in_spec parameter to run_instance
- 2.38 - Made check_can_live_migrate_[destination|source] take
- new-world instance objects
- 2.39 - Made revert_resize() and confirm_resize() take new-world
- instance objects
- 2.40 - Made reset_network() take new-world instance object
- 2.41 - Make inject_network_info take new-world instance object
- 2.42 - Splits snapshot_instance() into snapshot_instance() and
- backup_instance() and makes them take new-world instance
- objects.
- 2.43 - Made prep_resize() take new-world instance object
- 2.44 - Add volume_snapshot_create(), volume_snapshot_delete()
- 2.45 - Made resize_instance() take new-world objects
- 2.46 - Made finish_resize() take new-world objects
- 2.47 - Made finish_revert_resize() take new-world objects
+ * 2.28 - Adds check_instance_shared_storage()
+ * 2.29 - Made start_instance() and stop_instance() take new-world
+ instance objects
+ * 2.30 - Adds live_snapshot_instance()
+ * 2.31 - Adds shelve_instance(), shelve_offload_instance, and
+ unshelve_instance()
+ * 2.32 - Make reboot_instance take a new world instance object
+ * 2.33 - Made suspend_instance() and resume_instance() take new-world
+ instance objects
+ * 2.34 - Added swap_volume()
+ * 2.35 - Made terminate_instance() and soft_delete_instance() take
+ new-world instance objects
+ * 2.36 - Made pause_instance() and unpause_instance() take new-world
+ instance objects
+ * 2.37 - Added the legacy_bdm_in_spec parameter to run_instance
+ * 2.38 - Made check_can_live_migrate_[destination|source] take
+ new-world instance objects
+ * 2.39 - Made revert_resize() and confirm_resize() take new-world
+ instance objects
+ * 2.40 - Made reset_network() take new-world instance object
+ * 2.41 - Make inject_network_info take new-world instance object
+ * 2.42 - Splits snapshot_instance() into snapshot_instance() and
+ backup_instance() and makes them take new-world instance
+ objects.
+ * 2.43 - Made prep_resize() take new-world instance object
+ * 2.44 - Add volume_snapshot_create(), volume_snapshot_delete()
+ * 2.45 - Made resize_instance() take new-world objects
+ * 2.46 - Made finish_resize() take new-world objects
+ * 2.47 - Made finish_revert_resize() take new-world objects
... Havana supports message version 2.47. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.47.
- 2.48 - Make add_aggregate_host() and remove_aggregate_host() take
- new-world objects
- ... - Remove live_snapshot() that was never actually used
-
- 3.0 - Remove 2.x compatibility
- 3.1 - Update get_spice_console() to take an instance object
- 3.2 - Update get_vnc_console() to take an instance object
- 3.3 - Update validate_console_port() to take an instance object
- 3.4 - Update rebuild_instance() to take an instance object
- 3.5 - Pass preserve_ephemeral flag to rebuild_instance()
- 3.6 - Make volume_snapshot_{create,delete} use new-world objects
- 3.7 - Update change_instance_metadata() to take an instance object
- 3.8 - Update set_admin_password() to take an instance object
- 3.9 - Update rescue_instance() to take an instance object
- 3.10 - Added get_rdp_console method
- 3.11 - Update unrescue_instance() to take an object
- 3.12 - Update add_fixed_ip_to_instance() to take an object
- 3.13 - Update remove_fixed_ip_from_instance() to take an object
- 3.14 - Update post_live_migration_at_destination() to take an object
- 3.15 - Adds filter_properties and node to unshelve_instance()
- 3.16 - Make reserve_block_device_name and attach_volume use new-world
- objects, and add disk_bus and device_type params to
- reserve_block_device_name, and bdm param to attach_volume
- 3.17 - Update attach_interface and detach_interface to take an object
- 3.18 - Update get_diagnostics() to take an instance object
- ... - Removed inject_file(), as it was unused.
- 3.19 - Update pre_live_migration to take instance object
- 3.20 - Make restore_instance take an instance object
- 3.21 - Made rebuild take new-world BDM objects
- 3.22 - Made terminate_instance take new-world BDM objects
- 3.23 - Added external_instance_event()
- - build_and_run_instance was added in Havana and not used or
- documented.
+ * 2.48 - Make add_aggregate_host() and remove_aggregate_host() take
+ new-world objects
+ * ... - Remove live_snapshot() that was never actually used
+
+ * 3.0 - Remove 2.x compatibility
+ * 3.1 - Update get_spice_console() to take an instance object
+ * 3.2 - Update get_vnc_console() to take an instance object
+ * 3.3 - Update validate_console_port() to take an instance object
+ * 3.4 - Update rebuild_instance() to take an instance object
+ * 3.5 - Pass preserve_ephemeral flag to rebuild_instance()
+ * 3.6 - Make volume_snapshot_{create,delete} use new-world objects
+ * 3.7 - Update change_instance_metadata() to take an instance object
+ * 3.8 - Update set_admin_password() to take an instance object
+ * 3.9 - Update rescue_instance() to take an instance object
+ * 3.10 - Added get_rdp_console method
+ * 3.11 - Update unrescue_instance() to take an object
+ * 3.12 - Update add_fixed_ip_to_instance() to take an object
+ * 3.13 - Update remove_fixed_ip_from_instance() to take an object
+ * 3.14 - Update post_live_migration_at_destination() to take an object
+ * 3.15 - Adds filter_properties and node to unshelve_instance()
+ * 3.16 - Make reserve_block_device_name and attach_volume use new-world
+ objects, and add disk_bus and device_type params to
+ reserve_block_device_name, and bdm param to attach_volume
+ * 3.17 - Update attach_interface and detach_interface to take an object
+ * 3.18 - Update get_diagnostics() to take an instance object
+ * Removed inject_file(), as it was unused.
+ * 3.19 - Update pre_live_migration to take instance object
+ * 3.20 - Make restore_instance take an instance object
+ * 3.21 - Made rebuild take new-world BDM objects
+ * 3.22 - Made terminate_instance take new-world BDM objects
+ * 3.23 - Added external_instance_event()
+ * build_and_run_instance was added in Havana and not used or
+ documented.
... Icehouse supports message version 3.23. So, any changes to
existing methods in 3.x after that point should be done such that they
can handle the version_cap being set to 3.23.
- 3.24 - Update rescue_instance() to take optional rescue_image_ref
- 3.25 - Make detach_volume take an object
- 3.26 - Make live_migration() and
- rollback_live_migration_at_destination() take an object
- ... - Removed run_instance()
- 3.27 - Make run_instance() accept a new-world object
- 3.28 - Update get_console_output() to accept a new-world object
- 3.29 - Make check_instance_shared_storage accept a new-world object
- 3.30 - Make remove_volume_connection() accept a new-world object
- 3.31 - Add get_instance_diagnostics
- 3.32 - Add destroy_disks and migrate_data optional parameters to
- rollback_live_migration_at_destination()
+ * 3.24 - Update rescue_instance() to take optional rescue_image_ref
+ * 3.25 - Make detach_volume take an object
+ * 3.26 - Make live_migration() and
+ rollback_live_migration_at_destination() take an object
+ * ... Removed run_instance()
+ * 3.27 - Make run_instance() accept a new-world object
+ * 3.28 - Update get_console_output() to accept a new-world object
+ * 3.29 - Make check_instance_shared_storage accept a new-world object
+ * 3.30 - Make remove_volume_connection() accept a new-world object
+ * 3.31 - Add get_instance_diagnostics
+ * 3.32 - Add destroy_disks and migrate_data optional parameters to
+ rollback_live_migration_at_destination()
+
'''
VERSION_ALIASES = {
@@ -299,7 +307,7 @@ def _check_live_migration_api_version(self, server):
raise exception.LiveMigrationWithOldNovaNotSafe(server=server)
def add_aggregate_host(self, ctxt, aggregate, host_param, host,
- slave_info=None):
+ subordinate_info=None):
'''Add aggregate host.
:param ctxt: request context
@@ -320,7 +328,7 @@ def add_aggregate_host(self, ctxt, aggregate, host_param, host,
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'add_aggregate_host',
aggregate=aggregate, host=host_param,
- slave_info=slave_info)
+ subordinate_info=subordinate_info)
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
if self.client.can_send_version('3.12'):
@@ -688,7 +696,7 @@ def refresh_provider_fw_rules(self, ctxt, host):
cctxt.cast(ctxt, 'refresh_provider_fw_rules')
def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
- slave_info=None):
+ subordinate_info=None):
'''Remove aggregate host.
:param ctxt: request context
@@ -709,7 +717,7 @@ def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'remove_aggregate_host',
aggregate=aggregate, host=host_param,
- slave_info=slave_info)
+ subordinate_info=subordinate_info)
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
if self.client.can_send_version('3.13'):
diff --git a/nova/compute/stats.py b/nova/compute/stats.py
index bf183b012c..550bb5355d 100644
--- a/nova/compute/stats.py
+++ b/nova/compute/stats.py
@@ -15,6 +15,8 @@
from nova.compute import task_states
from nova.compute import vm_states
+from nova.i18n import _
+from nova.openstack.common import jsonutils
class Stats(dict):
@@ -31,6 +33,21 @@ def clear(self):
self.states.clear()
+ def digest_stats(self, stats):
+ """Apply stats provided as a dict or a json encoded string."""
+ # NOTE(pmurray): allow json strings as some drivers pass in
+ # stats in that way - they shouldn't really do that.
+ if stats is None:
+ return
+ if isinstance(stats, dict):
+ self.update(stats)
+ return
+ if isinstance(stats, str):
+ _stats_from_json = jsonutils.loads(stats)
+ self.update(_stats_from_json)
+ return
+ raise ValueError(_('Unexpected type adding stats'))
+
@property
def io_workload(self):
"""Calculate an I/O based load by counting I/O heavy operations."""
@@ -73,10 +90,6 @@ def num_os_type(self, os_type):
key = "num_os_type_%s" % os_type
return self.get(key, 0)
- @property
- def num_vcpus_used(self):
- return self.get("num_vcpus_used", 0)
-
def update_stats_for_instance(self, instance):
"""Update stats after an instance is changed."""
@@ -91,14 +104,12 @@ def update_stats_for_instance(self, instance):
self._decrement("num_task_%s" % old_state['task_state'])
self._decrement("num_os_type_%s" % old_state['os_type'])
self._decrement("num_proj_%s" % old_state['project_id'])
- x = self.get("num_vcpus_used", 0)
- self["num_vcpus_used"] = x - old_state['vcpus']
else:
# new instance
self._increment("num_instances")
# Now update stats from the new instance state:
- (vm_state, task_state, os_type, project_id, vcpus) = \
+ (vm_state, task_state, os_type, project_id) = \
self._extract_state_from_instance(instance)
if vm_state == vm_states.DELETED:
@@ -110,16 +121,10 @@ def update_stats_for_instance(self, instance):
self._increment("num_task_%s" % task_state)
self._increment("num_os_type_%s" % os_type)
self._increment("num_proj_%s" % project_id)
- x = self.get("num_vcpus_used", 0)
- self["num_vcpus_used"] = x + vcpus
# save updated I/O workload in stats:
self["io_workload"] = self.io_workload
- def update_stats_for_migration(self, instance_type, sign=1):
- x = self.get("num_vcpus_used", 0)
- self["num_vcpus_used"] = x + (sign * instance_type['vcpus'])
-
def _decrement(self, key):
x = self.get(key, 0)
self[key] = x - 1
@@ -136,10 +141,8 @@ def _extract_state_from_instance(self, instance):
task_state = instance['task_state']
os_type = instance['os_type']
project_id = instance['project_id']
- vcpus = instance['vcpus']
self.states[uuid] = dict(vm_state=vm_state, task_state=task_state,
- os_type=os_type, project_id=project_id,
- vcpus=vcpus)
+ os_type=os_type, project_id=project_id)
- return (vm_state, task_state, os_type, project_id, vcpus)
+ return (vm_state, task_state, os_type, project_id)
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index a0161029f0..6a00c13897 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -25,11 +25,11 @@
from nova.compute import power_state
from nova.compute import task_states
from nova import exception
+from nova.i18n import _LW
from nova.network import model as network_model
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log
from nova import rpc
from nova import utils
@@ -42,7 +42,7 @@
def exception_to_dict(fault):
"""Converts exceptions to a dict for use in notifications."""
- #TODO(johngarbutt) move to nova/exception.py to share with wrap_exception
+ # TODO(johngarbutt) move to nova/exception.py to share with wrap_exception
code = 500
if hasattr(fault, "kwargs"):
@@ -201,7 +201,7 @@ def get_image_metadata(context, image_api, image_id_or_uri, instance):
except (exception.ImageNotAuthorized,
exception.ImageNotFound,
exception.Invalid) as e:
- LOG.warning(_("Can't access image %(image_id)s: %(error)s"),
+ LOG.warning(_LW("Can't access image %(image_id)s: %(error)s"),
{"image_id": image_id_or_uri, "error": e},
instance=instance)
image_system_meta = {}
@@ -219,6 +219,25 @@ def get_image_metadata(context, image_api, image_id_or_uri, instance):
return utils.get_image_from_system_metadata(system_meta)
+def get_value_from_system_metadata(instance, key, type, default):
+ """Get a value of a specified type from image metadata.
+
+ @param instance: The instance object
+ @param key: The name of the property to get
+ @param type: The python type the value is be returned as
+ @param default: The value to return if key is not set or not the right type
+ """
+ value = instance.system_metadata.get(key, default)
+ try:
+ return type(value)
+ except ValueError:
+ LOG.warning(_LW("Metadata value %(value)s for %(key)s is not of "
+ "type %(type)s. Using default value %(default)s."),
+ {'value': value, 'key': key, 'type': type,
+ 'default': default}, instance=instance)
+ return default
+
+
def notify_usage_exists(notifier, context, instance_ref, current_period=False,
ignore_missing_network_data=True,
system_metadata=None, extra_usage_info=None):
@@ -325,7 +344,7 @@ def notify_about_host_update(context, event_suffix, host_payload):
"""
host_identifier = host_payload.get('host_name')
if not host_identifier:
- LOG.warn(_("No host name specified for the notification of "
+ LOG.warn(_LW("No host name specified for the notification of "
"HostAPI.%s and it will be ignored"), event_suffix)
return
@@ -453,7 +472,7 @@ def periodic_task_spacing_warn(config_option_name):
def wrapper(f):
if (hasattr(f, "_periodic_spacing") and
(f._periodic_spacing == 0 or f._periodic_spacing is None)):
- LOG.warning(_("Value of 0 or None specified for %s."
+ LOG.warning(_LW("Value of 0 or None specified for %s."
" This behaviour will change in meaning in the K release, to"
" mean 'call at the default rate' rather than 'do not call'."
" To keep the 'do not call' behaviour, use a negative value."),
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 576364c198..b7e3257a52 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -20,7 +20,7 @@
from nova import baserpc
from nova.conductor import manager
from nova.conductor import rpcapi
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova import utils
@@ -74,9 +74,6 @@ def instance_get_all_by_host_and_node(self, context, host, node):
return self._manager.instance_get_all_by_host(context, host, node,
None)
- def instance_info_cache_delete(self, context, instance):
- return self._manager.instance_info_cache_delete(context, instance)
-
def migration_get_in_progress_by_host_and_node(self, context, host, node):
return self._manager.migration_get_in_progress_by_host_and_node(
context, host, node)
@@ -260,6 +257,24 @@ def unshelve_instance(self, context, instance):
utils.spawn_n(self._manager.unshelve_instance, context,
instance=instance)
+ def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
+ injected_files, new_pass, orig_sys_metadata,
+ bdms, recreate=False, on_shared_storage=False,
+ preserve_ephemeral=False, host=None, kwargs=None):
+ # kwargs unused but required for cell compatibility.
+ utils.spawn_n(self._manager.rebuild_instance, context,
+ instance=instance,
+ new_pass=new_pass,
+ injected_files=injected_files,
+ image_ref=image_ref,
+ orig_image_ref=orig_image_ref,
+ orig_sys_metadata=orig_sys_metadata,
+ bdms=bdms,
+ recreate=recreate,
+ on_shared_storage=on_shared_storage,
+ host=host,
+ preserve_ephemeral=preserve_ephemeral)
+
class API(LocalAPI):
"""Conductor API that does updates via RPC to the ConductorManager."""
@@ -351,3 +366,21 @@ def build_instances(self, context, instances, image, filter_properties,
def unshelve_instance(self, context, instance):
self.conductor_compute_rpcapi.unshelve_instance(context,
instance=instance)
+
+ def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
+ injected_files, new_pass, orig_sys_metadata,
+ bdms, recreate=False, on_shared_storage=False,
+ preserve_ephemeral=False, host=None, kwargs=None):
+ # kwargs unused but required for cell compatibility
+ self.conductor_compute_rpcapi.rebuild_instance(context,
+ instance=instance,
+ new_pass=new_pass,
+ injected_files=injected_files,
+ image_ref=image_ref,
+ orig_image_ref=orig_image_ref,
+ orig_sys_metadata=orig_sys_metadata,
+ bdms=bdms,
+ recreate=recreate,
+ on_shared_storage=on_shared_storage,
+ preserve_ephemeral=preserve_ephemeral,
+ host=host)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 99aefc6b2c..345aa53768 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -31,6 +31,7 @@
from nova.conductor.tasks import live_migrate
from nova.db import base
from nova import exception
+from nova.i18n import _
from nova import image
from nova import manager
from nova import network
@@ -38,9 +39,7 @@
from nova import notifications
from nova import objects
from nova.objects import base as nova_object
-from nova.objects import quotas as quotas_obj
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -219,10 +218,10 @@ def block_device_mapping_get_all_by_instance(self, context, instance,
def instance_get_all_by_filters(self, context, filters, sort_key,
sort_dir, columns_to_join,
- use_slave):
+ use_subordinate):
result = self.db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir,
- columns_to_join=columns_to_join, use_slave=use_slave)
+ columns_to_join=columns_to_join, use_subordinate=use_subordinate)
return jsonutils.to_primitive(result)
def instance_get_active_by_window(self, context, begin, end,
@@ -242,9 +241,6 @@ def instance_destroy(self, context, instance):
result = self.db.instance_destroy(context, instance['uuid'])
return jsonutils.to_primitive(result)
- def instance_info_cache_delete(self, context, instance):
- self.db.instance_info_cache_delete(context, instance['uuid'])
-
def instance_fault_create(self, context, values):
result = self.db.instance_fault_create(context, values)
return jsonutils.to_primitive(result)
@@ -452,7 +448,7 @@ class ComputeTaskManager(base.Base):
may involve coordinating activities on multiple compute nodes.
"""
- target = messaging.Target(namespace='compute_task', version='1.7')
+ target = messaging.Target(namespace='compute_task', version='1.8')
def __init__(self):
super(ComputeTaskManager, self).__init__()
@@ -503,9 +499,9 @@ def _cold_migrate(self, context, instance, flavor, filter_properties,
request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor)
- quotas = quotas_obj.Quotas.from_reservations(context,
- reservations,
- instance=instance)
+ quotas = objects.Quotas.from_reservations(context,
+ reservations,
+ instance=instance)
try:
scheduler_utils.populate_retry(filter_properties, instance['uuid'])
hosts = self.scheduler_rpcapi.select_destinations(
@@ -572,7 +568,7 @@ def _live_migrate(self, context, instance, scheduler_hint,
exception.InstanceNotRunning,
exception.MigrationPreCheckError) as ex:
with excutils.save_and_reraise_exception():
- #TODO(johngarbutt) - eventually need instance actions here
+ # TODO(johngarbutt) - eventually need instance actions here
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
@@ -708,3 +704,44 @@ def safe_image_show(ctx, image_id):
del(sys_meta[key])
instance.system_metadata = sys_meta
instance.save()
+
+ def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
+ injected_files, new_pass, orig_sys_metadata,
+ bdms, recreate, on_shared_storage,
+ preserve_ephemeral=False, host=None):
+
+ with compute_utils.EventReporter(context, 'rebuild_server',
+ instance.uuid):
+ if not host:
+ # NOTE(lcostantino): Retrieve scheduler filters for the
+ # instance when the feature is available
+ filter_properties = {'ignore_hosts': [instance.host]}
+ request_spec = scheduler_utils.build_request_spec(context,
+ image_ref,
+ [instance])
+ try:
+ hosts = self.scheduler_rpcapi.select_destinations(context,
+ request_spec,
+ filter_properties)
+ host = hosts.pop(0)['host']
+ except exception.NoValidHost as ex:
+ with excutils.save_and_reraise_exception():
+ self._set_vm_state_and_notify(context,
+ 'rebuild_server',
+ {'vm_state': instance.vm_state,
+ 'task_state': None}, ex, request_spec)
+ LOG.warning(_("No valid host found for rebuild"),
+ instance=instance)
+
+ self.compute_rpcapi.rebuild_instance(context,
+ instance=instance,
+ new_pass=new_pass,
+ injected_files=injected_files,
+ image_ref=image_ref,
+ orig_image_ref=orig_image_ref,
+ orig_sys_metadata=orig_sys_metadata,
+ bdms=bdms,
+ recreate=recreate,
+ on_shared_storage=on_shared_storage,
+ preserve_ephemeral=preserve_ephemeral,
+ host=host)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 94974582b7..19e6162e06 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -34,121 +34,123 @@ class ConductorAPI(object):
API version history:
- 1.0 - Initial version.
- 1.1 - Added migration_update
- 1.2 - Added instance_get_by_uuid and instance_get_all_by_host
- 1.3 - Added aggregate_host_add and aggregate_host_delete
- 1.4 - Added migration_get
- 1.5 - Added bw_usage_update
- 1.6 - Added get_backdoor_port()
- 1.7 - Added aggregate_get_by_host, aggregate_metadata_add,
- and aggregate_metadata_delete
- 1.8 - Added security_group_get_by_instance and
- security_group_rule_get_by_security_group
- 1.9 - Added provider_fw_rule_get_all
- 1.10 - Added agent_build_get_by_triple
- 1.11 - Added aggregate_get
- 1.12 - Added block_device_mapping_update_or_create
- 1.13 - Added block_device_mapping_get_all_by_instance
- 1.14 - Added block_device_mapping_destroy
- 1.15 - Added instance_get_all_by_filters and
- instance_get_all_hung_in_rebooting and
- instance_get_active_by_window
- Deprecated instance_get_all_by_host
- 1.16 - Added instance_destroy
- 1.17 - Added instance_info_cache_delete
- 1.18 - Added instance_type_get
- 1.19 - Added vol_get_usage_by_time and vol_usage_update
- 1.20 - Added migration_get_unconfirmed_by_dest_compute
- 1.21 - Added service_get_all_by
- 1.22 - Added ping
- 1.23 - Added instance_get_all
- Un-Deprecate instance_get_all_by_host
- 1.24 - Added instance_get
- 1.25 - Added action_event_start and action_event_finish
- 1.26 - Added instance_info_cache_update
- 1.27 - Added service_create
- 1.28 - Added binary arg to service_get_all_by
- 1.29 - Added service_destroy
- 1.30 - Added migration_create
- 1.31 - Added migration_get_in_progress_by_host_and_node
- 1.32 - Added optional node to instance_get_all_by_host
- 1.33 - Added compute_node_create and compute_node_update
- 1.34 - Added service_update
- 1.35 - Added instance_get_active_by_window_joined
- 1.36 - Added instance_fault_create
- 1.37 - Added task_log_get, task_log_begin_task, task_log_end_task
- 1.38 - Added service name to instance_update
- 1.39 - Added notify_usage_exists
- 1.40 - Added security_groups_trigger_handler and
- security_groups_trigger_members_refresh
- Remove instance_get_active_by_window
- 1.41 - Added fixed_ip_get_by_instance, network_get,
- instance_floating_address_get_all, quota_commit,
- quota_rollback
- 1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host
- 1.43 - Added compute_stop
- 1.44 - Added compute_node_delete
- 1.45 - Added project_id to quota_commit and quota_rollback
- 1.46 - Added compute_confirm_resize
- 1.47 - Added columns_to_join to instance_get_all_by_host and
- instance_get_all_by_filters
- 1.48 - Added compute_unrescue
-
- ... Grizzly supports message version 1.48. So, any changes to existing
- methods in 2.x after that point should be done such that they can
- handle the version_cap being set to 1.48.
-
- 1.49 - Added columns_to_join to instance_get_by_uuid
- 1.50 - Added object_action() and object_class_action()
- 1.51 - Added the 'legacy' argument to
- block_device_mapping_get_all_by_instance
- 1.52 - Pass instance objects for compute_confirm_resize
- 1.53 - Added compute_reboot
- 1.54 - Added 'update_cells' argument to bw_usage_update
- 1.55 - Pass instance objects for compute_stop
- 1.56 - Remove compute_confirm_resize and
- migration_get_unconfirmed_by_dest_compute
- 1.57 - Remove migration_create()
- 1.58 - Remove migration_get()
-
- ... Havana supports message version 1.58. So, any changes to existing
- methods in 1.x after that point should be done such that they can
- handle the version_cap being set to 1.58.
-
- 1.59 - Remove instance_info_cache_update()
- 1.60 - Remove aggregate_metadata_add() and aggregate_metadata_delete()
- ... - Remove security_group_get_by_instance() and
- security_group_rule_get_by_security_group()
- 1.61 - Return deleted instance from instance_destroy()
- 1.62 - Added object_backport()
- 1.63 - Changed the format of values['stats'] from a dict to a JSON string
- in compute_node_update()
- 1.64 - Added use_slave to instance_get_all_filters()
- ... - Remove instance_type_get()
- ... - Remove aggregate_get()
- ... - Remove aggregate_get_by_host()
- ... - Remove instance_get()
- ... - Remove migration_update()
- ... - Remove block_device_mapping_destroy()
-
- 2.0 - Drop backwards compatibility
- ... - Remove quota_rollback() and quota_commit()
- ... - Remove aggregate_host_add() and aggregate_host_delete()
- ... - Remove network_migrate_instance_start() and
- network_migrate_instance_finish()
-
- ... Icehouse supports message version 2.0. So, any changes to
- existing methods in 2.x after that point should be done such that they
- can handle the version_cap being set to 2.0.
- ... - Remove instance_destroy()
- ... - Remove compute_unrescue()
- ... - Remove instance_get_all_by_filters()
- ... - Remove instance_get_active_by_window_joined()
- ... - Remove instance_fault_create()
- ... - Remove action_event_start() and action_event_finish()
- ... - Remove instance_get_by_uuid()
- ... - Remove agent_build_get_by_triple()
+ * 1.0 - Initial version.
+ * 1.1 - Added migration_update
+ * 1.2 - Added instance_get_by_uuid and instance_get_all_by_host
+ * 1.3 - Added aggregate_host_add and aggregate_host_delete
+ * 1.4 - Added migration_get
+ * 1.5 - Added bw_usage_update
+ * 1.6 - Added get_backdoor_port()
+ * 1.7 - Added aggregate_get_by_host, aggregate_metadata_add,
+ and aggregate_metadata_delete
+ * 1.8 - Added security_group_get_by_instance and
+ security_group_rule_get_by_security_group
+ * 1.9 - Added provider_fw_rule_get_all
+ * 1.10 - Added agent_build_get_by_triple
+ * 1.11 - Added aggregate_get
+ * 1.12 - Added block_device_mapping_update_or_create
+ * 1.13 - Added block_device_mapping_get_all_by_instance
+ * 1.14 - Added block_device_mapping_destroy
+ * 1.15 - Added instance_get_all_by_filters and
+ instance_get_all_hung_in_rebooting and
+ instance_get_active_by_window
+ Deprecated instance_get_all_by_host
+ * 1.16 - Added instance_destroy
+ * 1.17 - Added instance_info_cache_delete
+ * 1.18 - Added instance_type_get
+ * 1.19 - Added vol_get_usage_by_time and vol_usage_update
+ * 1.20 - Added migration_get_unconfirmed_by_dest_compute
+ * 1.21 - Added service_get_all_by
+ * 1.22 - Added ping
+ * 1.23 - Added instance_get_all
+ Un-Deprecate instance_get_all_by_host
+ * 1.24 - Added instance_get
+ * 1.25 - Added action_event_start and action_event_finish
+ * 1.26 - Added instance_info_cache_update
+ * 1.27 - Added service_create
+ * 1.28 - Added binary arg to service_get_all_by
+ * 1.29 - Added service_destroy
+ * 1.30 - Added migration_create
+ * 1.31 - Added migration_get_in_progress_by_host_and_node
+ * 1.32 - Added optional node to instance_get_all_by_host
+ * 1.33 - Added compute_node_create and compute_node_update
+ * 1.34 - Added service_update
+ * 1.35 - Added instance_get_active_by_window_joined
+ * 1.36 - Added instance_fault_create
+ * 1.37 - Added task_log_get, task_log_begin_task, task_log_end_task
+ * 1.38 - Added service name to instance_update
+ * 1.39 - Added notify_usage_exists
+ * 1.40 - Added security_groups_trigger_handler and
+ security_groups_trigger_members_refresh
+ Remove instance_get_active_by_window
+ * 1.41 - Added fixed_ip_get_by_instance, network_get,
+ instance_floating_address_get_all, quota_commit,
+ quota_rollback
+ * 1.42 - Added get_ec2_ids, aggregate_metadata_get_by_host
+ * 1.43 - Added compute_stop
+ * 1.44 - Added compute_node_delete
+ * 1.45 - Added project_id to quota_commit and quota_rollback
+ * 1.46 - Added compute_confirm_resize
+ * 1.47 - Added columns_to_join to instance_get_all_by_host and
+ instance_get_all_by_filters
+ * 1.48 - Added compute_unrescue
+
+ ... Grizzly supports message version 1.48. So, any changes to existing
+ methods in 2.x after that point should be done such that they can
+ handle the version_cap being set to 1.48.
+
+ * 1.49 - Added columns_to_join to instance_get_by_uuid
+ * 1.50 - Added object_action() and object_class_action()
+ * 1.51 - Added the 'legacy' argument to
+ block_device_mapping_get_all_by_instance
+ * 1.52 - Pass instance objects for compute_confirm_resize
+ * 1.53 - Added compute_reboot
+ * 1.54 - Added 'update_cells' argument to bw_usage_update
+ * 1.55 - Pass instance objects for compute_stop
+ * 1.56 - Remove compute_confirm_resize and
+ migration_get_unconfirmed_by_dest_compute
+ * 1.57 - Remove migration_create()
+ * 1.58 - Remove migration_get()
+
+ ... Havana supports message version 1.58. So, any changes to existing
+ methods in 1.x after that point should be done such that they can
+ handle the version_cap being set to 1.58.
+
+ * 1.59 - Remove instance_info_cache_update()
+ * 1.60 - Remove aggregate_metadata_add() and aggregate_metadata_delete()
+ * ... - Remove security_group_get_by_instance() and
+ security_group_rule_get_by_security_group()
+ * 1.61 - Return deleted instance from instance_destroy()
+ * 1.62 - Added object_backport()
+ * 1.63 - Changed the format of values['stats'] from a dict to a JSON string
+ in compute_node_update()
+ * 1.64 - Added use_subordinate to instance_get_all_filters()
+ - Remove instance_type_get()
+ - Remove aggregate_get()
+ - Remove aggregate_get_by_host()
+ - Remove instance_get()
+ - Remove migration_update()
+ - Remove block_device_mapping_destroy()
+
+ * 2.0 - Drop backwards compatibility
+ - Remove quota_rollback() and quota_commit()
+ - Remove aggregate_host_add() and aggregate_host_delete()
+ - Remove network_migrate_instance_start() and
+ network_migrate_instance_finish()
+
+ ... Icehouse supports message version 2.0. So, any changes to
+ existing methods in 2.x after that point should be done such
+ that they can handle the version_cap being set to 2.0.
+
+ * Remove instance_destroy()
+ * Remove compute_unrescue()
+ * Remove instance_get_all_by_filters()
+ * Remove instance_get_active_by_window_joined()
+ * Remove instance_fault_create()
+ * Remove action_event_start() and action_event_finish()
+ * Remove instance_get_by_uuid()
+ * Remove agent_build_get_by_triple()
+
"""
VERSION_ALIASES = {
@@ -219,11 +221,6 @@ def block_device_mapping_get_all_by_instance(self, context, instance,
return cctxt.call(context, 'block_device_mapping_get_all_by_instance',
instance=instance_p, legacy=legacy)
- def instance_info_cache_delete(self, context, instance):
- instance_p = jsonutils.to_primitive(instance)
- cctxt = self.client.prepare()
- cctxt.call(context, 'instance_info_cache_delete', instance=instance_p)
-
def vol_get_usage_by_time(self, context, start_time):
start_time_p = jsonutils.to_primitive(start_time)
cctxt = self.client.prepare()
@@ -367,6 +364,8 @@ class ComputeTaskAPI(object):
1.5 - Added the leagacy_bdm parameter to build_instances
1.6 - Made migrate_server use instance objects
1.7 - Do not send block_device_mapping and legacy_bdm to build_instances
+ 1.8 - Add rebuild_instance
+
"""
def __init__(self):
@@ -418,3 +417,17 @@ def build_instances(self, context, instances, image, filter_properties,
def unshelve_instance(self, context, instance):
cctxt = self.client.prepare(version='1.3')
cctxt.cast(context, 'unshelve_instance', instance=instance)
+
+ def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
+ image_ref, orig_image_ref, orig_sys_metadata, bdms,
+ recreate=False, on_shared_storage=False, host=None,
+ preserve_ephemeral=False, kwargs=None):
+ cctxt = self.client.prepare(version='1.8')
+ cctxt.cast(ctxt, 'rebuild_instance',
+ instance=instance, new_pass=new_pass,
+ injected_files=injected_files, image_ref=image_ref,
+ orig_image_ref=orig_image_ref,
+ orig_sys_metadata=orig_sys_metadata, bdms=bdms,
+ recreate=recreate, on_shared_storage=on_shared_storage,
+ preserve_ephemeral=preserve_ephemeral,
+ host=host)
diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py
index ec349150fe..7e016fd858 100644
--- a/nova/conductor/tasks/live_migrate.py
+++ b/nova/conductor/tasks/live_migrate.py
@@ -17,8 +17,8 @@
from nova.compute import utils as compute_utils
from nova import db
from nova import exception
+from nova.i18n import _
from nova import image
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova.scheduler import utils as scheduler_utils
@@ -60,8 +60,8 @@ def execute(self):
else:
self._check_requested_destination()
- #TODO(johngarbutt) need to move complexity out of compute manager
- #TODO(johngarbutt) disk_over_commit?
+ # TODO(johngarbutt) need to move complexity out of compute manager
+ # TODO(johngarbutt) disk_over_commit?
return self.compute_rpcapi.live_migration(self.context,
host=self.source,
instance=self.instance,
@@ -70,7 +70,7 @@ def execute(self):
migrate_data=self.migrate_data)
def rollback(self):
- #TODO(johngarbutt) need to implement the clean up operation
+ # TODO(johngarbutt) need to implement the clean up operation
# but this will make sense only once we pull in the compute
# calls, since this class currently makes no state changes,
# except to call the compute method, that has no matching
@@ -141,7 +141,7 @@ def _call_livem_checks_on_host(self, destination):
destination, self.block_migration, self.disk_over_commit)
def _find_destination(self):
- #TODO(johngarbutt) this retry loop should be shared
+ # TODO(johngarbutt) this retry loop should be shared
attempted_hosts = [self.source]
image = None
if self.instance.image_ref:
@@ -187,5 +187,5 @@ def execute(context, instance, destination,
destination,
block_migration,
disk_over_commit)
- #TODO(johngarbutt) create a superclass that contains a safe_execute call
+ # TODO(johngarbutt) create a superclass that contains a safe_execute call
return task.execute()
diff --git a/nova/config.py b/nova/config.py
index 6cad3485f8..c5600a7025 100644
--- a/nova/config.py
+++ b/nova/config.py
@@ -19,6 +19,7 @@
from nova import debugger
from nova.openstack.common.db import options
+from nova.openstack.common import log
from nova import paths
from nova import rpc
from nova import version
@@ -30,6 +31,9 @@ def parse_args(argv, default_config_files=None):
options.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION,
sqlite_db='nova.sqlite')
rpc.set_defaults(control_exchange='nova')
+ nova_default_log_levels = (log.DEFAULT_LOG_LEVELS +
+ ["keystonemiddleware=WARN", "routes.middleware=WARN"])
+ log.set_defaults(default_log_levels=nova_default_log_levels)
debugger.register_cli_opts()
cfg.CONF(argv[1:],
project='nova',
diff --git a/nova/console/api.py b/nova/console/api.py
index f3cb9d3b42..5b1dbb7050 100644
--- a/nova/console/api.py
+++ b/nova/console/api.py
@@ -46,11 +46,11 @@ def delete_console(self, context, instance_uuid, console_uuid):
rpcapi.remove_console(context, console['id'])
def create_console(self, context, instance_uuid):
- #NOTE(mdragon): If we wanted to return this the console info
- # here, as we would need to do a call.
- # They can just do an index later to fetch
- # console info. I am not sure which is better
- # here.
+ # NOTE(mdragon): If we wanted to return this the console info
+ # here, as we would need to do a call.
+ # They can just do an index later to fetch
+ # console info. I am not sure which is better
+ # here.
instance = self._get_instance(context, instance_uuid)
topic = self._get_console_topic(context, instance['host'])
server = None
diff --git a/nova/console/manager.py b/nova/console/manager.py
index 2e66320a94..95c07352f7 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -110,9 +110,9 @@ def _get_pool_for_instance_host(self, context, instance_host):
self.host,
console_type)
except exception.NotFound:
- #NOTE(mdragon): Right now, the only place this info exists is the
- # compute worker's flagfile, at least for
- # xenserver. Thus we ned to ask.
+ # NOTE(mdragon): Right now, the only place this info exists is the
+ # compute worker's flagfile, at least for
+ # xenserver. Thus we ned to ask.
if CONF.stub_compute:
pool_info = {'address': '127.0.0.1',
'username': 'test',
diff --git a/nova/console/websocketproxy.py b/nova/console/websocketproxy.py
index d02dcd043b..fbc2be8933 100644
--- a/nova/console/websocketproxy.py
+++ b/nova/console/websocketproxy.py
@@ -25,7 +25,7 @@
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
index f55645b2d6..d1790391de 100644
--- a/nova/console/xvp.py
+++ b/nova/console/xvp.py
@@ -23,8 +23,8 @@
from nova import context
from nova import db
+from nova.i18n import _
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import paths
@@ -40,7 +40,7 @@
help='Generated XVP conf file'),
cfg.StrOpt('console_xvp_pid',
default='/var/run/xvp.pid',
- help='XVP master process pid file'),
+ help='XVP main process pid file'),
cfg.StrOpt('console_xvp_log',
default='/var/log/xvp.log',
help='XVP log file'),
@@ -69,7 +69,7 @@ def console_type(self):
def get_port(self, context):
"""Get available port for consoles that need one."""
- #TODO(mdragon): implement port selection for non multiplex ports,
+ # TODO(mdragon): implement port selection for non multiplex ports,
# we are not using that, but someone else may want
# it.
return CONF.console_xvp_multiplex_port
@@ -131,7 +131,7 @@ def _xvp_stop(self):
try:
os.kill(pid, signal.SIGTERM)
except OSError:
- #if it's already not running, no problem.
+ # if it's already not running, no problem.
pass
def _xvp_start(self):
@@ -196,7 +196,7 @@ def _xvp_encrypt(self, password, is_pool_password=False):
if is_pool_password:
maxlen = 16
flag = '-x'
- #xvp will blow up on passwords that are too long (mdragon)
+ # xvp will blow up on passwords that are too long (mdragon)
password = password[:maxlen]
out, err = utils.execute('xvp', flag, process_input=password)
if err:
diff --git a/nova/consoleauth/manager.py b/nova/consoleauth/manager.py
index 966b398a45..ff92f1b579 100644
--- a/nova/consoleauth/manager.py
+++ b/nova/consoleauth/manager.py
@@ -23,9 +23,9 @@
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import rpcapi as compute_rpcapi
+from nova.i18n import _, _LW
from nova import manager
from nova import objects
-from nova.openstack.common.gettextutils import _, _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
diff --git a/nova/consoleauth/rpcapi.py b/nova/consoleauth/rpcapi.py
index fa130a2541..2bba0f8c96 100644
--- a/nova/consoleauth/rpcapi.py
+++ b/nova/consoleauth/rpcapi.py
@@ -33,16 +33,16 @@ class ConsoleAuthAPI(object):
API version history:
- 1.0 - Initial version.
- 1.1 - Added get_backdoor_port()
- 1.2 - Added instance_uuid to authorize_console, and
- delete_tokens_for_instance
+ * 1.0 - Initial version.
+ * 1.1 - Added get_backdoor_port()
+ * 1.2 - Added instance_uuid to authorize_console, and
+ delete_tokens_for_instance
... Grizzly and Havana support message version 1.2. So, any changes
to existing methods in 2.x after that point should be done such that
they can handle the version_cap being set to 1.2.
- 2.0 - Major API rev for Icehouse
+ * 2.0 - Major API rev for Icehouse
... Icehouse supports message version 2.0. So, any changes to
existing methods in 2.x after that point should be done such that they
diff --git a/nova/context.py b/nova/context.py
index ae0d9b51fd..818791458a 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -23,7 +23,7 @@
import six
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import local
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -82,7 +82,7 @@ def __init__(self, user_id, project_id, is_admin=None, read_deleted="no",
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
- if s.get('type') in ('volume',)]
+ if s.get('type') in ('volume', 'volumev2')]
else:
# if list is empty or none
self.service_catalog = []
diff --git a/nova/crypto.py b/nova/crypto.py
index 8d6f8d4402..ebbc3068cd 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -35,9 +35,9 @@
from nova import context
from nova import db
from nova import exception
+from nova.i18n import _
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.openstack.common import timeutils
diff --git a/nova/db/api.py b/nova/db/api.py
index ee7ad22209..7380e41ac5 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -31,8 +31,8 @@
from oslo.config import cfg
from nova.cells import rpcapi as cells_rpcapi
+from nova.i18n import _
from nova.openstack.common.db import api as db_api
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
@@ -484,12 +484,12 @@ def migration_get_by_instance_and_status(context, instance_uuid, status):
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
- dest_compute, use_slave=False):
+ dest_compute, use_subordinate=False):
"""Finds all unconfirmed migrations within the confirmation window for
a specific destination compute host.
"""
return IMPL.migration_get_unconfirmed_by_dest_compute(context,
- confirm_window, dest_compute, use_slave=use_slave)
+ confirm_window, dest_compute, use_subordinate=use_subordinate)
def migration_get_in_progress_by_host_and_node(context, host, node):
@@ -626,10 +626,10 @@ def virtual_interface_get_by_uuid(context, vif_uuid):
return IMPL.virtual_interface_get_by_uuid(context, vif_uuid)
-def virtual_interface_get_by_instance(context, instance_id, use_slave=False):
+def virtual_interface_get_by_instance(context, instance_id, use_subordinate=False):
"""Gets all virtual_interfaces for instance."""
return IMPL.virtual_interface_get_by_instance(context, instance_id,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
def virtual_interface_get_by_instance_and_network(context, instance_id,
@@ -670,10 +670,10 @@ def instance_destroy(context, instance_uuid, constraint=None,
return rv
-def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
+def instance_get_by_uuid(context, uuid, columns_to_join=None, use_subordinate=False):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get_by_uuid(context, uuid,
- columns_to_join, use_slave=use_slave)
+ columns_to_join, use_subordinate=use_subordinate)
def instance_get(context, instance_id, columns_to_join=None):
@@ -689,32 +689,34 @@ def instance_get_all(context, columns_to_join=None):
def instance_get_all_by_filters(context, filters, sort_key='created_at',
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
"""Get all instances that match all filters."""
return IMPL.instance_get_all_by_filters(context, filters, sort_key,
sort_dir, limit=limit,
marker=marker,
columns_to_join=columns_to_join,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
def instance_get_active_by_window_joined(context, begin, end=None,
- project_id=None, host=None):
+ project_id=None, host=None,
+ use_subordinate=False):
"""Get instances and joins active during a certain time window.
Specifying a project_id will filter for a certain project.
Specifying a host will filter for instances on a given compute host.
"""
return IMPL.instance_get_active_by_window_joined(context, begin, end,
- project_id, host)
+ project_id, host,
+ use_subordinate=use_subordinate)
def instance_get_all_by_host(context, host,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
"""Get all instances belonging to a host."""
return IMPL.instance_get_all_by_host(context, host,
columns_to_join,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
def instance_get_all_by_host_and_node(context, host, node):
@@ -801,15 +803,13 @@ def instance_remove_security_group(context, instance_id, security_group_id):
####################
-def instance_group_create(context, values, policies=None, metadata=None,
- members=None):
- """Create a new group with metadata.
+def instance_group_create(context, values, policies=None, members=None):
+ """Create a new group.
Each group will receive a unique uuid. This will be used for access to the
group.
"""
- return IMPL.instance_group_create(context, values, policies, metadata,
- members)
+ return IMPL.instance_group_create(context, values, policies, members)
def instance_group_get(context, group_uuid):
@@ -1188,16 +1188,16 @@ def ec2_volume_get_by_uuid(context, volume_uuid):
return IMPL.ec2_volume_get_by_uuid(context, volume_uuid)
-def get_snapshot_uuid_by_ec2_id(context, ec2_id):
- return IMPL.get_snapshot_uuid_by_ec2_id(context, ec2_id)
+def ec2_snapshot_create(context, snapshot_id, forced_id=None):
+ return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id)
-def get_ec2_snapshot_id_by_uuid(context, snapshot_id):
- return IMPL.get_ec2_snapshot_id_by_uuid(context, snapshot_id)
+def ec2_snapshot_get_by_ec2_id(context, ec2_id):
+ return IMPL.ec2_snapshot_get_by_ec2_id(context, ec2_id)
-def ec2_snapshot_create(context, snapshot_id, forced_id=None):
- return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id)
+def ec2_snapshot_get_by_uuid(context, snapshot_uuid):
+ return IMPL.ec2_snapshot_get_by_uuid(context, snapshot_uuid)
####################
@@ -1222,11 +1222,11 @@ def block_device_mapping_update_or_create(context, values, legacy=True):
def block_device_mapping_get_all_by_instance(context, instance_uuid,
- use_slave=False):
+ use_subordinate=False):
"""Get all block device mapping belonging to an instance."""
return IMPL.block_device_mapping_get_all_by_instance(context,
instance_uuid,
- use_slave)
+ use_subordinate)
def block_device_mapping_get_by_volume_id(context, volume_id,
@@ -1658,7 +1658,7 @@ def agent_build_update(context, agent_build_id, values):
####################
-def bw_usage_get(context, uuid, start_period, mac, use_slave=False):
+def bw_usage_get(context, uuid, start_period, mac, use_subordinate=False):
"""Return bw usage for instance and mac in a given audit period."""
return IMPL.bw_usage_get(context, uuid, start_period, mac)
@@ -1769,6 +1769,10 @@ def aggregate_host_get_by_metadata_key(context, key):
return IMPL.aggregate_host_get_by_metadata_key(context, key)
+def aggregate_get_by_metadata_key(context, key):
+ return IMPL.aggregate_get_by_metadata_key(context, key)
+
+
def aggregate_update(context, aggregate_id, values):
"""Update the attributes of an aggregates.
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 0d5458b849..0a47d1465b 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -40,10 +40,13 @@
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import noload
from sqlalchemy.schema import Table
+from sqlalchemy import sql
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
-from sqlalchemy.sql.expression import select
+from sqlalchemy.sql import false
from sqlalchemy.sql import func
+from sqlalchemy.sql import null
+from sqlalchemy.sql import true
from sqlalchemy import String
from nova import block_device
@@ -52,11 +55,11 @@
import nova.context
from nova.db.sqlalchemy import models
from nova import exception
+from nova.i18n import _
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common.db.sqlalchemy import utils as sqlalchemyutils
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
@@ -71,10 +74,10 @@
]
connection_opts = [
- cfg.StrOpt('slave_connection',
+ cfg.StrOpt('subordinate_connection',
secret=True,
help='The SQLAlchemy connection string used to connect to the '
- 'slave database'),
+ 'subordinate database'),
]
CONF = cfg.CONF
@@ -92,12 +95,12 @@
_SLAVE_FACADE = None
-def _create_facade_lazily(use_slave=False):
+def _create_facade_lazily(use_subordinate=False):
global _MASTER_FACADE
global _SLAVE_FACADE
- return_slave = use_slave and CONF.database.slave_connection
- if not return_slave:
+ return_subordinate = use_subordinate and CONF.database.subordinate_connection
+ if not return_subordinate:
if _MASTER_FACADE is None:
_MASTER_FACADE = db_session.EngineFacade(
CONF.database.connection,
@@ -107,19 +110,19 @@ def _create_facade_lazily(use_slave=False):
else:
if _SLAVE_FACADE is None:
_SLAVE_FACADE = db_session.EngineFacade(
- CONF.database.slave_connection,
+ CONF.database.subordinate_connection,
**dict(CONF.database.iteritems())
)
return _SLAVE_FACADE
-def get_engine(use_slave=False):
- facade = _create_facade_lazily(use_slave)
+def get_engine(use_subordinate=False):
+ facade = _create_facade_lazily(use_subordinate)
return facade.get_engine()
-def get_session(use_slave=False, **kwargs):
- facade = _create_facade_lazily(use_slave)
+def get_session(use_subordinate=False, **kwargs):
+ facade = _create_facade_lazily(use_subordinate)
return facade.get_session(**kwargs)
@@ -215,7 +218,7 @@ def model_query(context, model, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
- :param use_slave: If true, use slave_connection
+ :param use_subordinate: If true, use subordinate_connection
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
@@ -227,11 +230,11 @@ def model_query(context, model, *args, **kwargs):
model parameter.
"""
- use_slave = kwargs.get('use_slave') or False
- if CONF.database.slave_connection == '':
- use_slave = False
+ use_subordinate = kwargs.get('use_subordinate') or False
+ if CONF.database.subordinate_connection == '':
+ use_subordinate = False
- session = kwargs.get('session') or get_session(use_slave=use_slave)
+ session = kwargs.get('session') or get_session(use_subordinate=use_subordinate)
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only', False)
@@ -262,7 +265,7 @@ def issubclassof_nova_base(obj):
if project_only == 'allow_none':
query = query.\
filter(or_(base_model.project_id == context.project_id,
- base_model.project_id == None))
+ base_model.project_id == null()))
else:
query = query.filter_by(project_id=context.project_id)
@@ -586,12 +589,12 @@ def compute_node_get_all(context, no_date_fields):
def filter_columns(table):
return [c for c in table.c if c.name not in redundant_columns]
- compute_node_query = select(filter_columns(compute_node)).\
+ compute_node_query = sql.select(filter_columns(compute_node)).\
where(compute_node.c.deleted == 0).\
order_by(compute_node.c.service_id)
compute_node_rows = conn.execute(compute_node_query).fetchall()
- service_query = select(filter_columns(service)).\
+ service_query = sql.select(filter_columns(service)).\
where((service.c.deleted == 0) &
(service.c.binary == 'nova-compute')).\
order_by(service.c.id)
@@ -685,7 +688,7 @@ def compute_node_statistics(context):
func.sum(models.ComputeNode.disk_available_least),
base_model=models.ComputeNode,
read_deleted="no").\
- filter(models.Service.disabled == False).\
+ filter(models.Service.disabled == false()).\
filter(
models.Service.id ==
models.ComputeNode.service_id).\
@@ -902,21 +905,14 @@ def floating_ip_fixed_ip_associate(context, floating_address,
@_retry_on_deadlock
def floating_ip_deallocate(context, address):
session = get_session()
-
with session.begin():
- floating_ip_ref = model_query(context, models.FloatingIp,
- session=session).\
- filter_by(address=address).\
- filter(models.FloatingIp.project_id != None).\
- with_lockmode('update').\
- first()
-
- if floating_ip_ref:
- floating_ip_ref.update({'project_id': None,
- 'host': None,
- 'auto_assigned': False})
-
- return floating_ip_ref
+ return model_query(context, models.FloatingIp, session=session).\
+ filter_by(address=address).\
+ filter(models.FloatingIp.project_id != null()).\
+ update({'project_id': None,
+ 'host': None,
+ 'auto_assigned': False},
+ synchronize_session=False)
@require_context
@@ -1047,6 +1043,7 @@ def floating_ip_update(context, address, values):
float_ip_ref.save(session=session)
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
+ return float_ip_ref
def _dnsdomain_get(context, session, fqdomain):
@@ -1129,7 +1126,7 @@ def fixed_ip_associate(context, address, instance_uuid, network_id=None,
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
- models.FixedIp.network_id == None)
+ models.FixedIp.network_id == null())
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
@@ -1162,7 +1159,7 @@ def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
session = get_session()
with session.begin():
network_or_none = or_(models.FixedIp.network_id == network_id,
- models.FixedIp.network_id == None)
+ models.FixedIp.network_id == null())
fixed_ip_ref = model_query(context, models.FixedIp, session=session,
read_deleted="no").\
filter(network_or_none).\
@@ -1234,12 +1231,12 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time):
# join with update doesn't work.
with session.begin():
host_filter = or_(and_(models.Instance.host == host,
- models.Network.multi_host == True),
+ models.Network.multi_host == true()),
models.Network.host == host)
result = model_query(context, models.FixedIp.id,
base_model=models.FixedIp, read_deleted="no",
session=session).\
- filter(models.FixedIp.allocated == False).\
+ filter(models.FixedIp.allocated == false()).\
filter(models.FixedIp.updated_at < time).\
join((models.Network,
models.Network.id == models.FixedIp.network_id)).\
@@ -1456,9 +1453,9 @@ def virtual_interface_create(context, values):
return vif_ref
-def _virtual_interface_query(context, session=None, use_slave=False):
+def _virtual_interface_query(context, session=None, use_subordinate=False):
return model_query(context, models.VirtualInterface, session=session,
- read_deleted="no", use_slave=use_slave)
+ read_deleted="no", use_subordinate=use_subordinate)
@require_context
@@ -1504,12 +1501,12 @@ def virtual_interface_get_by_uuid(context, vif_uuid):
@require_context
@require_instance_exists_using_uuid
-def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False):
+def virtual_interface_get_by_instance(context, instance_uuid, use_subordinate=False):
"""Gets all virtual interfaces for instance.
:param instance_uuid: = uuid of the instance to retrieve vifs for
"""
- vif_refs = _virtual_interface_query(context, use_slave=use_slave).\
+ vif_refs = _virtual_interface_query(context, use_subordinate=use_subordinate).\
filter_by(instance_uuid=instance_uuid).\
order_by(asc("created_at"), asc("id")).\
all()
@@ -1706,16 +1703,16 @@ def instance_destroy(context, instance_uuid, constraint=None):
@require_context
-def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
+def instance_get_by_uuid(context, uuid, columns_to_join=None, use_subordinate=False):
return _instance_get_by_uuid(context, uuid,
- columns_to_join=columns_to_join, use_slave=use_slave)
+ columns_to_join=columns_to_join, use_subordinate=use_subordinate)
def _instance_get_by_uuid(context, uuid, session=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
result = _build_instance_get(context, session=session,
columns_to_join=columns_to_join,
- use_slave=use_slave).\
+ use_subordinate=use_subordinate).\
filter_by(uuid=uuid).\
first()
@@ -1744,9 +1741,9 @@ def instance_get(context, instance_id, columns_to_join=None):
def _build_instance_get(context, session=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
query = model_query(context, models.Instance, session=session,
- project_only=True, use_slave=use_slave).\
+ project_only=True, use_subordinate=use_subordinate).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache'))
if columns_to_join is None:
@@ -1756,7 +1753,7 @@ def _build_instance_get(context, session=None,
# Already always joined above
continue
query = query.options(joinedload(column))
- #NOTE(alaski) Stop lazy loading of columns not needed.
+ # NOTE(alaski) Stop lazy loading of columns not needed.
for col in ['metadata', 'system_metadata']:
if col not in columns_to_join:
query = query.options(noload(col))
@@ -1764,7 +1761,7 @@ def _build_instance_get(context, session=None,
def _instances_fill_metadata(context, instances,
- manual_joins=None, use_slave=False):
+ manual_joins=None, use_subordinate=False):
"""Selectively fill instances with manually-joined metadata. Note that
instance will be converted to a dict.
@@ -1782,13 +1779,13 @@ def _instances_fill_metadata(context, instances,
meta = collections.defaultdict(list)
if 'metadata' in manual_joins:
for row in _instance_metadata_get_multi(context, uuids,
- use_slave=use_slave):
+ use_subordinate=use_subordinate):
meta[row['instance_uuid']].append(row)
sys_meta = collections.defaultdict(list)
if 'system_metadata' in manual_joins:
for row in _instance_system_metadata_get_multi(context, uuids,
- use_slave=use_slave):
+ use_subordinate=use_subordinate):
sys_meta[row['instance_uuid']].append(row)
pcidevs = collections.defaultdict(list)
@@ -1840,39 +1837,40 @@ def instance_get_all(context, columns_to_join=None):
@require_context
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
limit=None, marker=None, columns_to_join=None,
- use_slave=False):
+ use_subordinate=False):
"""Return instances that match all filters. Deleted instances
will be returned by default, unless there's a filter that says
otherwise.
Depending on the name of a filter, matching for that filter is
performed using either exact matching or as regular expression
- matching. Exact matching is applied for the following filters:
+ matching. Exact matching is applied for the following filters::
- ['project_id', 'user_id', 'image_ref',
- 'vm_state', 'instance_type_id', 'uuid',
- 'metadata', 'host', 'system_metadata']
+ | ['project_id', 'user_id', 'image_ref',
+ | 'vm_state', 'instance_type_id', 'uuid',
+ | 'metadata', 'host', 'system_metadata']
A third type of filter (also using exact matching), filters
based on instance metadata tags when supplied under a special
- key named 'filter'.
-
- filters = {
- 'filter': [
- {'name': 'tag-key', 'value': ''},
- {'name': 'tag-value', 'value': ''},
- {'name': 'tag:', 'value': ''}
- ]
- }
-
- Special keys are used to tweek the query further:
-
- 'changes-since' - only return instances updated after
- 'deleted' - only return (or exclude) deleted instances
- 'soft_deleted' - modify behavior of 'deleted' to either
- include or exclude instances whose
- vm_state is SOFT_DELETED.
+ key named 'filter'::
+
+ | filters = {
+ | 'filter': [
+ | {'name': 'tag-key', 'value': ''},
+ | {'name': 'tag-value', 'value': ''},
+ | {'name': 'tag:', 'value': ''}
+ | ]
+ | }
+
+ Special keys are used to tweek the query further::
+
+ | 'changes-since' - only return instances updated after
+ | 'deleted' - only return (or exclude) deleted instances
+ | 'soft_deleted' - modify behavior of 'deleted' to either
+ | include or exclude instances whose
+ | vm_state is SOFT_DELETED.
+
"""
# NOTE(mriedem): If the limit is 0 there is no point in even going
# to the database since nothing is going to be returned anyway.
@@ -1881,10 +1879,10 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
sort_fn = {'desc': desc, 'asc': asc}
- if CONF.database.slave_connection == '':
- use_slave = False
+ if CONF.database.subordinate_connection == '':
+ use_subordinate = False
- session = get_session(use_slave=use_slave)
+ session = get_session(use_subordinate=use_subordinate)
if columns_to_join is None:
columns_to_join = ['info_cache', 'security_groups']
@@ -1930,7 +1928,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
# but until then we test it explicitly as a workaround.
not_soft_deleted = or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
- models.Instance.vm_state == None
+ models.Instance.vm_state == null()
)
query_prefix = query_prefix.filter(not_soft_deleted)
@@ -2072,14 +2070,15 @@ def regex_filter(query, model, filters):
@require_context
def instance_get_active_by_window_joined(context, begin, end=None,
- project_id=None, host=None):
+ project_id=None, host=None,
+ use_subordinate=False):
"""Return instances and joins that were active during window."""
- session = get_session()
+ session = get_session(use_subordinate=use_subordinate)
query = session.query(models.Instance)
query = query.options(joinedload('info_cache')).\
options(joinedload('security_groups')).\
- filter(or_(models.Instance.terminated_at == None,
+ filter(or_(models.Instance.terminated_at == null(),
models.Instance.terminated_at > begin))
if end:
query = query.filter(models.Instance.launched_at < end)
@@ -2092,14 +2091,14 @@ def instance_get_active_by_window_joined(context, begin, end=None,
def _instance_get_all_query(context, project_only=False,
- joins=None, use_slave=False):
+ joins=None, use_subordinate=False):
if joins is None:
joins = ['info_cache', 'security_groups']
query = model_query(context,
models.Instance,
project_only=project_only,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
for join in joins:
query = query.options(joinedload(join))
return query
@@ -2108,12 +2107,12 @@ def _instance_get_all_query(context, project_only=False,
@require_admin_context
def instance_get_all_by_host(context, host,
columns_to_join=None,
- use_slave=False):
+ use_subordinate=False):
return _instances_fill_metadata(context,
_instance_get_all_query(context,
- use_slave=use_slave).filter_by(host=host).all(),
+ use_subordinate=use_subordinate).filter_by(host=host).all(),
manual_joins=columns_to_join,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
def _instance_get_all_uuids_by_host(context, host, session=None):
@@ -2173,19 +2172,12 @@ def instance_floating_address_get_all(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
- fixed_ip_ids = model_query(context, models.FixedIp.id,
- base_model=models.FixedIp).\
- filter_by(instance_uuid=instance_uuid).\
- all()
- if not fixed_ip_ids:
- raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
-
- fixed_ip_ids = [fixed_ip_id.id for fixed_ip_id in fixed_ip_ids]
-
- floating_ips = model_query(context, models.FloatingIp.address,
+ floating_ips = model_query(context,
+ models.FloatingIp.address,
base_model=models.FloatingIp).\
- filter(models.FloatingIp.fixed_ip_id.in_(fixed_ip_ids)).\
- all()
+ join(models.FloatingIp.fixed_ip).\
+ filter_by(instance_uuid=instance_uuid)
+
return [floating_ip.address for floating_ip in floating_ips]
@@ -2609,8 +2601,8 @@ def network_get_all_by_uuids(context, network_uuids, project_only):
if not result:
raise exception.NoNetworksFound()
- #check if the result contains all the networks
- #we are looking for
+ # check if the result contains all the networks
+ # we are looking for
for network_uuid in network_uuids:
for network in result:
if network['uuid'] == network_uuid:
@@ -2655,8 +2647,8 @@ def network_get_associated_fixed_ips(context, network_id, host=None):
filter(models.FixedIp.network_id == network_id).\
join((models.VirtualInterface, vif_and)).\
join((models.Instance, inst_and)).\
- filter(models.FixedIp.instance_uuid != None).\
- filter(models.FixedIp.virtual_interface_id != None)
+ filter(models.FixedIp.instance_uuid != null()).\
+ filter(models.FixedIp.virtual_interface_id != null())
if host:
query = query.filter(models.Instance.host == host)
result = query.all()
@@ -2804,8 +2796,8 @@ def quota_get_all_by_project_and_user(context, project_id, user_id):
all()
result = {'project_id': project_id, 'user_id': user_id}
- for quota in user_quotas:
- result[quota.resource] = quota.hard_limit
+ for user_quota in user_quotas:
+ result[user_quota.resource] = user_quota.hard_limit
return result
@@ -2964,7 +2956,7 @@ def _quota_usage_get_all(context, project_id, user_id=None):
result = {'project_id': project_id}
if user_id:
query = query.filter(or_(models.QuotaUsage.user_id == user_id,
- models.QuotaUsage.user_id == None))
+ models.QuotaUsage.user_id == null()))
result['user_id'] = user_id
rows = query.all()
@@ -3018,7 +3010,7 @@ def quota_usage_update(context, project_id, user_id, resource, **kwargs):
filter_by(project_id=project_id).\
filter_by(resource=resource).\
filter(or_(models.QuotaUsage.user_id == user_id,
- models.QuotaUsage.user_id == None)).\
+ models.QuotaUsage.user_id == null())).\
update(updates)
if not result:
@@ -3374,6 +3366,7 @@ def quota_destroy_all_by_project(context, project_id):
@require_admin_context
+@_retry_on_deadlock
def reservation_expire(context):
session = get_session()
with session.begin():
@@ -3454,39 +3447,39 @@ def ec2_snapshot_create(context, snapshot_uuid, id=None):
@require_context
-def get_ec2_snapshot_id_by_uuid(context, snapshot_id):
+def ec2_snapshot_get_by_ec2_id(context, ec2_id):
result = _ec2_snapshot_get_query(context).\
- filter_by(uuid=snapshot_id).\
+ filter_by(id=ec2_id).\
first()
if not result:
- raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
+ raise exception.SnapshotNotFound(snapshot_id=ec2_id)
- return result['id']
+ return result
@require_context
-def get_snapshot_uuid_by_ec2_id(context, ec2_id):
+def ec2_snapshot_get_by_uuid(context, snapshot_uuid):
result = _ec2_snapshot_get_query(context).\
- filter_by(id=ec2_id).\
+ filter_by(uuid=snapshot_uuid).\
first()
if not result:
- raise exception.SnapshotNotFound(snapshot_id=ec2_id)
+ raise exception.SnapshotNotFound(snapshot_id=snapshot_uuid)
- return result['uuid']
+ return result
###################
def _block_device_mapping_get_query(context, session=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
if columns_to_join is None:
columns_to_join = []
query = model_query(context, models.BlockDeviceMapping,
- session=session, use_slave=use_slave)
+ session=session, use_subordinate=use_subordinate)
for column in columns_to_join:
query = query.options(joinedload(column))
@@ -3568,8 +3561,8 @@ def block_device_mapping_update_or_create(context, values, legacy=True):
@require_context
def block_device_mapping_get_all_by_instance(context, instance_uuid,
- use_slave=False):
- return _block_device_mapping_get_query(context, use_slave=use_slave).\
+ use_subordinate=False):
+ return _block_device_mapping_get_query(context, use_subordinate=use_subordinate).\
filter_by(instance_uuid=instance_uuid).\
all()
@@ -4074,12 +4067,12 @@ def migration_get_by_instance_and_status(context, instance_uuid, status):
@require_admin_context
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
- dest_compute, use_slave=False):
+ dest_compute, use_subordinate=False):
confirm_window = (timeutils.utcnow() -
datetime.timedelta(seconds=confirm_window))
return model_query(context, models.Migration, read_deleted="yes",
- use_slave=use_slave).\
+ use_subordinate=use_subordinate).\
filter(models.Migration.updated_at <= confirm_window).\
filter_by(status="finished").\
filter_by(dest_compute=dest_compute).\
@@ -4284,7 +4277,7 @@ def _flavor_get_query(context, session=None, read_deleted=None):
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
if not context.is_admin:
- the_filter = [models.InstanceTypes.is_public == True]
+ the_filter = [models.InstanceTypes.is_public == true()]
the_filter.extend([
models.InstanceTypes.projects.any(project_id=context.project_id)
])
@@ -4598,11 +4591,11 @@ def cell_get_all(context):
# User-provided metadata
def _instance_metadata_get_multi(context, instance_uuids,
- session=None, use_slave=False):
+ session=None, use_subordinate=False):
if not instance_uuids:
return []
return model_query(context, models.InstanceMetadata,
- session=session, use_slave=use_slave).\
+ session=session, use_subordinate=use_subordinate).\
filter(
models.InstanceMetadata.instance_uuid.in_(instance_uuids))
@@ -4664,11 +4657,11 @@ def instance_metadata_update(context, instance_uuid, metadata, delete):
def _instance_system_metadata_get_multi(context, instance_uuids,
- session=None, use_slave=False):
+ session=None, use_subordinate=False):
if not instance_uuids:
return []
return model_query(context, models.InstanceSystemMetadata,
- session=session, use_slave=use_slave).\
+ session=session, use_subordinate=use_subordinate).\
filter(
models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids))
@@ -4771,9 +4764,9 @@ def agent_build_update(context, agent_build_id, values):
####################
@require_context
-def bw_usage_get(context, uuid, start_period, mac, use_slave=False):
+def bw_usage_get(context, uuid, start_period, mac, use_subordinate=False):
return model_query(context, models.BandwidthUsage, read_deleted="yes",
- use_slave=use_slave).\
+ use_subordinate=use_subordinate).\
filter_by(start_period=start_period).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
@@ -4840,9 +4833,9 @@ def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return model_query(context, models.VolumeUsage, read_deleted="yes").\
- filter(or_(models.VolumeUsage.tot_last_refreshed == None,
+ filter(or_(models.VolumeUsage.tot_last_refreshed == null(),
models.VolumeUsage.tot_last_refreshed > begin,
- models.VolumeUsage.curr_last_refreshed == None,
+ models.VolumeUsage.curr_last_refreshed == null(),
models.VolumeUsage.curr_last_refreshed > begin,
)).\
all()
@@ -5101,13 +5094,7 @@ def aggregate_metadata_get_by_metadata_key(context, aggregate_id, key):
def aggregate_host_get_by_metadata_key(context, key):
- query = model_query(context, models.Aggregate)
- query = query.join("_metadata")
- query = query.filter(models.AggregateMetadata.key == key)
- query = query.options(contains_eager("_metadata"))
- query = query.options(joinedload("_hosts"))
- rows = query.all()
-
+ rows = aggregate_get_by_metadata_key(context, key)
metadata = collections.defaultdict(set)
for agg in rows:
for agghost in agg._hosts:
@@ -5115,6 +5102,19 @@ def aggregate_host_get_by_metadata_key(context, key):
return dict(metadata)
+def aggregate_get_by_metadata_key(context, key):
+ """Return rows that match metadata key.
+
+ :param key Matches metadata key.
+ """
+ query = model_query(context, models.Aggregate)
+ query = query.join("_metadata")
+ query = query.filter(models.AggregateMetadata.key == key)
+ query = query.options(contains_eager("_metadata"))
+ query = query.options(joinedload("_hosts"))
+ return query.all()
+
+
def aggregate_update(context, aggregate_id, values):
session = get_session()
@@ -5171,7 +5171,7 @@ def aggregate_delete(context, aggregate_id):
if count == 0:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
- #Delete Metadata
+ # Delete Metadata
model_query(context,
models.AggregateMetadata, session=session).\
filter_by(aggregate_id=aggregate_id).\
@@ -5568,7 +5568,7 @@ def task_log_end_task(context, task_name, period_beginning, period_ending,
period_ending, host, session=session).\
update(values)
if rows == 0:
- #It's not running!
+ # It's not running!
raise exception.TaskNotRunning(task_name=task_name, host=host)
@@ -5624,10 +5624,10 @@ def archive_deleted_rows_for_table(context, tablename, max_rows):
column = table.c.id
# NOTE(guochbo): Use InsertFromSelect and DeleteFromSelect to avoid
# database's limit of maximum parameter in one SQL statement.
- query_insert = select([table],
+ query_insert = sql.select([table],
table.c.deleted != default_deleted_value).\
order_by(column).limit(max_rows)
- query_delete = select([column],
+ query_delete = sql.select([column],
table.c.deleted != default_deleted_value).\
order_by(column).limit(max_rows)
@@ -5678,8 +5678,7 @@ def archive_deleted_rows(context, max_rows=None):
def _instance_group_get_query(context, model_class, id_field=None, id=None,
session=None, read_deleted=None):
- columns_to_join = {models.InstanceGroup: ['_policies', '_metadata',
- '_members']}
+ columns_to_join = {models.InstanceGroup: ['_policies', '_members']}
query = model_query(context, model_class, session=session,
read_deleted=read_deleted)
@@ -5692,9 +5691,9 @@ def _instance_group_get_query(context, model_class, id_field=None, id=None,
return query
-def instance_group_create(context, values, policies=None, metadata=None,
+def instance_group_create(context, values, policies=None,
members=None):
- """Create a new group with metadata."""
+ """Create a new group."""
uuid = values.get('uuid', None)
if uuid is None:
uuid = uuidutils.generate_uuid()
@@ -5711,14 +5710,10 @@ def instance_group_create(context, values, policies=None, metadata=None,
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this instance group.
group._policies = []
- group._metadata = []
group._members = []
if policies:
_instance_group_policies_add(context, group.id, policies,
session=session)
- if metadata:
- _instance_group_metadata_add(context, group.id, metadata,
- session=session)
if members:
_instance_group_members_add(context, group.id, members,
session=session)
@@ -5760,13 +5755,6 @@ def instance_group_update(context, group_uuid, values):
values.pop('policies'),
set_delete=True,
session=session)
- metadata = values.get('metadata')
- if metadata is not None:
- _instance_group_metadata_add(context,
- group.id,
- values.pop('metadata'),
- set_delete=True,
- session=session)
members = values.get('members')
if members is not None:
_instance_group_members_add(context,
@@ -5779,8 +5767,6 @@ def instance_group_update(context, group_uuid, values):
if policies:
values['policies'] = policies
- if metadata:
- values['metadata'] = metadata
if members:
values['members'] = members
@@ -5801,7 +5787,6 @@ def instance_group_delete(context, group_uuid):
# Delete policies, metadata and members
instance_models = [models.InstanceGroupPolicy,
- models.InstanceGroupMetadata,
models.InstanceGroupMember]
for model in instance_models:
model_query(context, model, session=session).\
@@ -5844,70 +5829,6 @@ def _instance_group_id(context, group_uuid, session=None):
return result.id
-def _instance_group_metadata_add(context, id, metadata, set_delete=False,
- session=None):
- if not session:
- session = get_session()
-
- with session.begin(subtransactions=True):
- all_keys = metadata.keys()
- query = _instance_group_model_get_query(context,
- models.InstanceGroupMetadata,
- id,
- session=session)
- if set_delete:
- query.filter(~models.InstanceGroupMetadata.key.in_(all_keys)).\
- soft_delete(synchronize_session=False)
-
- query = query.filter(models.InstanceGroupMetadata.key.in_(all_keys))
- already_existing_keys = set()
- for meta_ref in query.all():
- key = meta_ref.key
- meta_ref.update({'value': metadata[key]})
- already_existing_keys.add(key)
-
- for key, value in metadata.iteritems():
- if key in already_existing_keys:
- continue
- meta_ref = models.InstanceGroupMetadata()
- meta_ref.update({'key': key,
- 'value': value,
- 'group_id': id})
- session.add(meta_ref)
-
- return metadata
-
-
-def instance_group_metadata_add(context, group_uuid, metadata,
- set_delete=False):
- id = _instance_group_id(context, group_uuid)
- return _instance_group_metadata_add(context, id, metadata,
- set_delete=set_delete)
-
-
-def instance_group_metadata_delete(context, group_uuid, key):
- id = _instance_group_id(context, group_uuid)
- count = _instance_group_get_query(context,
- models.InstanceGroupMetadata,
- models.InstanceGroupMetadata.group_id,
- id).\
- filter_by(key=key).\
- soft_delete()
- if count == 0:
- raise exception.InstanceGroupMetadataNotFound(group_uuid=group_uuid,
- metadata_key=key)
-
-
-def instance_group_metadata_get(context, group_uuid):
- id = _instance_group_id(context, group_uuid)
- rows = model_query(context,
- models.InstanceGroupMetadata.key,
- models.InstanceGroupMetadata.value,
- base_model=models.InstanceGroupMetadata).\
- filter_by(group_id=id).all()
- return dict((r[0], r[1]) for r in rows)
-
-
def _instance_group_members_add(context, id, members, set_delete=False,
session=None):
if not session:
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py b/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py
index 8db11e692b..0469e5a265 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py
@@ -20,7 +20,7 @@
from sqlalchemy import Text
from sqlalchemy.types import NullType
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -1536,13 +1536,14 @@ def upgrade(migrate_engine):
refcolumns=fkey_pair[1])
fkey.create()
- if migrate_engine.name == "mysql":
+ if migrate_engine.name == 'mysql':
# In Folsom we explicitly converted migrate_version to UTF8.
- sql = "ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8;"
+ migrate_engine.execute(
+ 'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8')
# Set default DB charset to UTF8.
- sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" % \
- migrate_engine.url.database
- migrate_engine.execute(sql)
+ migrate_engine.execute(
+ 'ALTER DATABASE %s DEFAULT CHARACTER SET utf8' %
+ migrate_engine.url.database)
_create_shadow_tables(migrate_engine)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py b/nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py
new file mode 100644
index 0000000000..917ea1461e
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py
@@ -0,0 +1,59 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Index, MetaData, Table
+
+from nova.i18n import _LI
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def _get_deleted_expire_index(table):
+ members = sorted(['deleted', 'expire'])
+ for idx in table.indexes:
+ if sorted(idx.columns.keys()) == members:
+ return idx
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ reservations = Table('reservations', meta, autoload=True)
+ if _get_deleted_expire_index(reservations):
+ LOG.info(_LI('Skipped adding reservations_deleted_expire_idx '
+ 'because an equivalent index already exists.'))
+ return
+
+ # Based on expire_reservations query
+ # from: nova/db/sqlalchemy/api.py
+ index = Index('reservations_deleted_expire_idx',
+ reservations.c.deleted, reservations.c.expire)
+
+ index.create(migrate_engine)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ reservations = Table('reservations', meta, autoload=True)
+
+ index = _get_deleted_expire_index(reservations)
+ if index:
+ index.drop(migrate_engine)
+ else:
+ LOG.info(_LI('Skipped removing reservations_deleted_expire_idx '
+ 'because index does not exist.'))
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/246_add_compute_node_id_fk.py b/nova/db/sqlalchemy/migrate_repo/versions/246_add_compute_node_id_fk.py
new file mode 100644
index 0000000000..a7e32e2d64
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/246_add_compute_node_id_fk.py
@@ -0,0 +1,41 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from migrate.changeset.constraint import ForeignKeyConstraint
+from sqlalchemy import MetaData, Table
+
+
+def upgrade(migrate_engine):
+ """Add missing foreign key constraint on pci_devices.compute_node_id."""
+ meta = MetaData(bind=migrate_engine)
+
+ pci_devices = Table('pci_devices', meta, autoload=True)
+ compute_nodes = Table('compute_nodes', meta, autoload=True)
+
+ fkey = ForeignKeyConstraint(columns=[pci_devices.c.compute_node_id],
+ refcolumns=[compute_nodes.c.id])
+ fkey.create()
+
+
+def downgrade(migrate_engine):
+ meta = MetaData(bind=migrate_engine)
+
+ pci_devices = Table('pci_devices', meta, autoload=True)
+ compute_nodes = Table('compute_nodes', meta, autoload=True)
+
+ fkey = ForeignKeyConstraint(columns=[pci_devices.c.compute_node_id],
+ refcolumns=[compute_nodes.c.id])
+ fkey.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_downgrade.sql
new file mode 100644
index 0000000000..57c0db235f
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_downgrade.sql
@@ -0,0 +1,49 @@
+BEGIN TRANSACTION;
+ CREATE TABLE pci_devices_new (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted INTEGER,
+ id INTEGER NOT NULL,
+ compute_node_id INTEGER NOT NULL,
+ address VARCHAR(12) NOT NULL,
+ vendor_id VARCHAR(4) NOT NULL,
+ product_id VARCHAR(4) NOT NULL,
+ dev_type VARCHAR(8) NOT NULL,
+ dev_id VARCHAR(255),
+ label VARCHAR(255) NOT NULL,
+ status VARCHAR(36) NOT NULL,
+ extra_info TEXT,
+ instance_uuid VARCHAR(36),
+ PRIMARY KEY (id),
+ CONSTRAINT uniq_pci_devices0compute_node_id0address0deleted UNIQUE (compute_node_id, address, deleted)
+ );
+
+ INSERT INTO pci_devices_new
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ compute_node_id,
+ address,
+ vendor_id,
+ product_id,
+ dev_type,
+ dev_id,
+ label,
+ status,
+ extra_info,
+ instance_uuid
+ FROM pci_devices;
+
+ DROP TABLE pci_devices;
+
+ ALTER TABLE pci_devices_new RENAME TO pci_devices;
+
+ CREATE INDEX ix_pci_devices_compute_node_id_deleted
+ ON pci_devices (compute_node_id, deleted);
+
+ CREATE INDEX ix_pci_devices_instance_uuid_deleted
+ ON pci_devices (instance_uuid, deleted);
+COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_upgrade.sql
new file mode 100644
index 0000000000..8aa9ecc78e
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/246_sqlite_upgrade.sql
@@ -0,0 +1,50 @@
+BEGIN TRANSACTION;
+ CREATE TABLE pci_devices_new (
+ created_at DATETIME,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted INTEGER,
+ id INTEGER NOT NULL,
+ compute_node_id INTEGER NOT NULL,
+ address VARCHAR(12) NOT NULL,
+ vendor_id VARCHAR(4) NOT NULL,
+ product_id VARCHAR(4) NOT NULL,
+ dev_type VARCHAR(8) NOT NULL,
+ dev_id VARCHAR(255),
+ label VARCHAR(255) NOT NULL,
+ status VARCHAR(36) NOT NULL,
+ extra_info TEXT,
+ instance_uuid VARCHAR(36),
+ PRIMARY KEY (id),
+ FOREIGN KEY (compute_node_id) REFERENCES compute_nodes(id),
+ CONSTRAINT uniq_pci_devices0compute_node_id0address0deleted UNIQUE (compute_node_id, address, deleted)
+ );
+
+ INSERT INTO pci_devices_new
+ SELECT created_at,
+ updated_at,
+ deleted_at,
+ deleted,
+ id,
+ compute_node_id,
+ address,
+ vendor_id,
+ product_id,
+ dev_type,
+ dev_id,
+ label,
+ status,
+ extra_info,
+ instance_uuid
+ FROM pci_devices;
+
+ DROP TABLE pci_devices;
+
+ ALTER TABLE pci_devices_new RENAME TO pci_devices;
+
+ CREATE INDEX ix_pci_devices_compute_node_id_deleted
+ ON pci_devices (compute_node_id, deleted);
+
+ CREATE INDEX ix_pci_devices_instance_uuid_deleted
+ ON pci_devices (instance_uuid, deleted);
+COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/247_nullable_mismatch.py b/nova/db/sqlalchemy/migrate_repo/versions/247_nullable_mismatch.py
new file mode 100644
index 0000000000..78f608732c
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/247_nullable_mismatch.py
@@ -0,0 +1,43 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from sqlalchemy import MetaData, Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData(bind=migrate_engine)
+
+ quota_usages = Table('quota_usages', meta, autoload=True)
+ quota_usages.c.resource.alter(nullable=False)
+
+ pci_devices = Table('pci_devices', meta, autoload=True)
+ pci_devices.c.deleted.alter(nullable=True)
+ pci_devices.c.product_id.alter(nullable=False)
+ pci_devices.c.vendor_id.alter(nullable=False)
+ pci_devices.c.dev_type.alter(nullable=False)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData(bind=migrate_engine)
+
+ quota_usages = Table('quota_usages', meta, autoload=True)
+ quota_usages.c.resource.alter(nullable=True)
+
+ pci_devices = Table('pci_devices', meta, autoload=True)
+ pci_devices.c.deleted.alter(nullable=False)
+ pci_devices.c.product_id.alter(nullable=True)
+ pci_devices.c.vendor_id.alter(nullable=True)
+ pci_devices.c.dev_type.alter(nullable=True)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py b/nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py
new file mode 100644
index 0000000000..917ea1461e
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py
@@ -0,0 +1,59 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Index, MetaData, Table
+
+from nova.i18n import _LI
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def _get_deleted_expire_index(table):
+ members = sorted(['deleted', 'expire'])
+ for idx in table.indexes:
+ if sorted(idx.columns.keys()) == members:
+ return idx
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ reservations = Table('reservations', meta, autoload=True)
+ if _get_deleted_expire_index(reservations):
+ LOG.info(_LI('Skipped adding reservations_deleted_expire_idx '
+ 'because an equivalent index already exists.'))
+ return
+
+ # Based on expire_reservations query
+ # from: nova/db/sqlalchemy/api.py
+ index = Index('reservations_deleted_expire_idx',
+ reservations.c.deleted, reservations.c.expire)
+
+ index.create(migrate_engine)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ reservations = Table('reservations', meta, autoload=True)
+
+ index = _get_deleted_expire_index(reservations)
+ if index:
+ index.drop(migrate_engine)
+ else:
+ LOG.info(_LI('Skipped removing reservations_deleted_expire_idx '
+ 'because index does not exist.'))
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/249_remove_duplicate_index.py b/nova/db/sqlalchemy/migrate_repo/versions/249_remove_duplicate_index.py
new file mode 100644
index 0000000000..9d2f797c9c
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/249_remove_duplicate_index.py
@@ -0,0 +1,36 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from sqlalchemy import MetaData, Table
+
+
+INDEX_NAME = 'block_device_mapping_instance_uuid_virtual_name_device_name_idx'
+
+
+def upgrade(migrate_engine):
+ """Remove duplicate index from block_device_mapping table."""
+
+ meta = MetaData(bind=migrate_engine)
+
+ bdm = Table('block_device_mapping', meta, autoload=True)
+ for index in bdm.indexes:
+ if index.name == INDEX_NAME:
+ index.drop()
+
+
+def downgrade(migrate_engine):
+ # Unnecessary to re-add duplicate index when downgrading
+ pass
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/250_remove_instance_groups_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/250_remove_instance_groups_metadata.py
new file mode 100644
index 0000000000..667baae823
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/250_remove_instance_groups_metadata.py
@@ -0,0 +1,71 @@
+# Copyright 2014 Red Hat, Inc.
+# All Rights Reserved
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from sqlalchemy import MetaData, Table, Column, DateTime, Integer, String, \
+ ForeignKey
+
+
+def upgrade(migrate_engine):
+ """Remove the instance_group_metadata table."""
+ meta = MetaData(bind=migrate_engine)
+
+ if migrate_engine.has_table('instance_group_metadata'):
+ group_metadata = Table('instance_group_metadata', meta, autoload=True)
+ group_metadata.drop()
+
+ if migrate_engine.has_table('shadow_instance_group_metadata'):
+ shadow_group_metadata = Table('shadow_instance_group_metadata', meta,
+ autoload=True)
+ shadow_group_metadata.drop()
+
+
+def downgrade(migrate_engine):
+ """Revert removal of the instance_group_metadata table."""
+ meta = MetaData(bind=migrate_engine)
+ Table('instance_groups', meta, autoload=True)
+ Table('shadow_instance_groups', meta, autoload=True)
+
+ if not migrate_engine.has_table('instance_group_metadata'):
+ group_metadata = Table('instance_group_metadata', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Integer),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('key', String(length=255)),
+ Column('value', String(length=255)),
+ Column('group_id', Integer, ForeignKey('instance_groups.id'),
+ nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+ group_metadata.create()
+ if not migrate_engine.has_table('shadow_instance_group_metadata'):
+ shadow_group_metadata = Table('shadow_instance_group_metadata', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Integer),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('key', String(length=255)),
+ Column('value', String(length=255)),
+ Column('group_id', Integer,
+ ForeignKey('shadow_instance_groups.id'),
+ nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+ shadow_group_metadata.create()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/251_add_numa_topology_to_comput_nodes.py b/nova/db/sqlalchemy/migrate_repo/versions/251_add_numa_topology_to_comput_nodes.py
new file mode 100644
index 0000000000..c2510d9b07
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/251_add_numa_topology_to_comput_nodes.py
@@ -0,0 +1,41 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from sqlalchemy import Column
+from sqlalchemy import MetaData
+from sqlalchemy import Table
+from sqlalchemy import Text
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ compute_nodes = Table('compute_nodes', meta, autoload=True)
+ shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
+
+ numa_topology = Column('numa_topology', Text, nullable=True)
+ shadow_numa_topology = Column('numa_topology', Text, nullable=True)
+ compute_nodes.create_column(numa_topology)
+ shadow_compute_nodes.create_column(shadow_numa_topology)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ compute_nodes = Table('compute_nodes', meta, autoload=True)
+ shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
+
+ compute_nodes.drop_column('numa_topology')
+ shadow_compute_nodes.drop_column('numa_topology')
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index 2d140e67d9..388e6d2fee 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -23,7 +23,7 @@
from nova.db.sqlalchemy import api as db_session
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
INIT_VERSION = 215
_REPOSITORY = None
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 739c8aaf28..bf52b5622b 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -22,8 +22,8 @@
from sqlalchemy import Column, Index, Integer, BigInteger, Enum, String, schema
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy import orm
from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
-from sqlalchemy.orm import relationship, backref, object_mapper
from oslo.config import cfg
from nova.db.sqlalchemy import types
@@ -79,8 +79,8 @@ class ComputeNode(BASE, NovaBase):
__table_args__ = ()
id = Column(Integer, primary_key=True)
service_id = Column(Integer, ForeignKey('services.id'), nullable=False)
- service = relationship(Service,
- backref=backref('compute_node'),
+ service = orm.relationship(Service,
+ backref=orm.backref('compute_node'),
foreign_keys=service_id,
primaryjoin='and_('
'ComputeNode.service_id == Service.id,'
@@ -131,6 +131,10 @@ class ComputeNode(BASE, NovaBase):
# json-encode string containing compute node statistics
stats = Column(Text, default='{}')
+ # json-encoded dict that contains NUMA topology as generated by
+ # nova.virt.hardware.VirtNUMAHostTopology.to_json()
+ numa_topology = Column(Text)
+
class Certificate(BASE, NovaBase):
"""Represents a x509 certificate."""
@@ -180,7 +184,7 @@ def name(self):
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
- for column in iter(object_mapper(self).columns):
+ for column in iter(orm.object_mapper(self).columns):
key = column.name
# prevent recursion if someone specifies %(name)s
# %(name)s will not be valid.
@@ -304,8 +308,8 @@ class InstanceInfoCache(BASE, NovaBase):
instance_uuid = Column(String(36), ForeignKey('instances.uuid'),
nullable=False)
- instance = relationship(Instance,
- backref=backref('info_cache', uselist=False),
+ instance = orm.relationship(Instance,
+ backref=orm.backref('info_cache', uselist=False),
foreign_keys=instance_uuid,
primaryjoin=instance_uuid == Instance.uuid)
@@ -478,6 +482,7 @@ class Reservation(BASE, NovaBase):
__table_args__ = (
Index('ix_reservations_project_id', 'project_id'),
Index('reservations_uuid_idx', 'uuid'),
+ Index('reservations_deleted_expire_idx', 'deleted', 'expire'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
@@ -491,7 +496,7 @@ class Reservation(BASE, NovaBase):
delta = Column(Integer, nullable=False)
expire = Column(DateTime)
- usage = relationship(
+ usage = orm.relationship(
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,'
@@ -537,17 +542,18 @@ class BlockDeviceMapping(BASE, NovaBase):
Index('block_device_mapping_instance_uuid_volume_id_idx',
'instance_uuid', 'volume_id'),
Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'),
- #TODO(sshturm) Should be dropped. `virtual_name` was dropped
- #in 186 migration,
- #Duplicates `block_device_mapping_instance_uuid_device_name_idx` index.
+ # TODO(sshturm) Should be dropped. `virtual_name` was dropped
+ # in 186 migration,
+ # Duplicates `block_device_mapping_instance_uuid_device_name_idx`
+ # index.
Index("block_device_mapping_instance_uuid_virtual_name"
"_device_name_idx", 'instance_uuid', 'device_name'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
- instance = relationship(Instance,
- backref=backref('block_device_mapping'),
+ instance = orm.relationship(Instance,
+ backref=orm.backref('block_device_mapping'),
foreign_keys=instance_uuid,
primaryjoin='and_(BlockDeviceMapping.'
'instance_uuid=='
@@ -569,7 +575,7 @@ class BlockDeviceMapping(BASE, NovaBase):
# With EC2 API,
# default True for ami specified device.
# default False for created with other timing.
- #TODO(sshturm) add default in db
+ # TODO(sshturm) add default in db
delete_on_termination = Column(Boolean, default=False)
snapshot_id = Column(String(36))
@@ -598,8 +604,8 @@ class IscsiTarget(BASE, NovaBase):
target_num = Column(Integer)
host = Column(String(255))
volume_id = Column(String(36), ForeignKey('volumes.id'))
- volume = relationship(Volume,
- backref=backref('iscsi_target', uselist=False),
+ volume = orm.relationship(Volume,
+ backref=orm.backref('iscsi_target', uselist=False),
foreign_keys=volume_id,
primaryjoin='and_(IscsiTarget.volume_id==Volume.id,'
'IscsiTarget.deleted==0)')
@@ -630,7 +636,7 @@ class SecurityGroup(BASE, NovaBase):
user_id = Column(String(255))
project_id = Column(String(255))
- instances = relationship(Instance,
+ instances = orm.relationship(Instance,
secondary="security_group_instance_association",
primaryjoin='and_('
'SecurityGroup.id == '
@@ -653,7 +659,7 @@ class SecurityGroupIngressRule(BASE, NovaBase):
id = Column(Integer, primary_key=True)
parent_group_id = Column(Integer, ForeignKey('security_groups.id'))
- parent_group = relationship("SecurityGroup", backref="rules",
+ parent_group = orm.relationship("SecurityGroup", backref="rules",
foreign_keys=parent_group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,'
@@ -667,7 +673,7 @@ class SecurityGroupIngressRule(BASE, NovaBase):
# Note: This is not the parent SecurityGroup. It's SecurityGroup we're
# granting access for.
group_id = Column(Integer, ForeignKey('security_groups.id'))
- grantee_group = relationship("SecurityGroup",
+ grantee_group = orm.relationship("SecurityGroup",
foreign_keys=group_id,
primaryjoin='and_('
'SecurityGroupIngressRule.group_id == SecurityGroup.id,'
@@ -735,10 +741,10 @@ class Migration(BASE, NovaBase):
old_instance_type_id = Column(Integer())
new_instance_type_id = Column(Integer())
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
- #TODO(_cerberus_): enum
+ # TODO(_cerberus_): enum
status = Column(String(255))
- instance = relationship("Instance", foreign_keys=instance_uuid,
+ instance = orm.relationship("Instance", foreign_keys=instance_uuid,
primaryjoin='and_(Migration.instance_uuid == '
'Instance.uuid, Instance.deleted == '
'0)')
@@ -838,22 +844,22 @@ class FixedIp(BASE, NovaBase):
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
# associated means that a fixed_ip has its instance_id column set
# allocated means that a fixed_ip has its virtual_interface_id column set
- #TODO(sshturm) add default in db
+ # TODO(sshturm) add default in db
allocated = Column(Boolean, default=False)
# leased means dhcp bridge has leased the ip
- #TODO(sshturm) add default in db
+ # TODO(sshturm) add default in db
leased = Column(Boolean, default=False)
- #TODO(sshturm) add default in db
+ # TODO(sshturm) add default in db
reserved = Column(Boolean, default=False)
host = Column(String(255))
- network = relationship(Network,
- backref=backref('fixed_ips'),
+ network = orm.relationship(Network,
+ backref=orm.backref('fixed_ips'),
foreign_keys=network_id,
primaryjoin='and_('
'FixedIp.network_id == Network.id,'
'FixedIp.deleted == 0,'
'Network.deleted == 0)')
- instance = relationship(Instance,
+ instance = orm.relationship(Instance,
foreign_keys=instance_uuid,
primaryjoin='and_('
'FixedIp.instance_uuid == Instance.uuid,'
@@ -879,11 +885,11 @@ class FloatingIp(BASE, NovaBase):
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
auto_assigned = Column(Boolean, default=False)
- #TODO(sshturm) add default in db
+ # TODO(sshturm) add default in db
pool = Column(String(255))
interface = Column(String(255))
- fixed_ip = relationship(FixedIp,
- backref=backref('floating_ips'),
+ fixed_ip = orm.relationship(FixedIp,
+ backref=orm.backref('floating_ips'),
foreign_keys=fixed_ip_id,
primaryjoin='and_('
'FloatingIp.fixed_ip_id == FixedIp.id,'
@@ -935,7 +941,7 @@ class Console(BASE, NovaBase):
password = Column(String(255))
port = Column(Integer)
pool_id = Column(Integer, ForeignKey('console_pools.id'))
- pool = relationship(ConsolePool, backref=backref('consoles'))
+ pool = orm.relationship(ConsolePool, backref=orm.backref('consoles'))
class InstanceMetadata(BASE, NovaBase):
@@ -948,7 +954,7 @@ class InstanceMetadata(BASE, NovaBase):
key = Column(String(255))
value = Column(String(255))
instance_uuid = Column(String(36), ForeignKey('instances.uuid'))
- instance = relationship(Instance, backref="metadata",
+ instance = orm.relationship(Instance, backref="metadata",
foreign_keys=instance_uuid,
primaryjoin='and_('
'InstanceMetadata.instance_uuid == '
@@ -969,7 +975,7 @@ class InstanceSystemMetadata(BASE, NovaBase):
primary_join = ('and_(InstanceSystemMetadata.instance_uuid == '
'Instance.uuid, InstanceSystemMetadata.deleted == 0)')
- instance = relationship(Instance, backref="system_metadata",
+ instance = orm.relationship(Instance, backref="system_metadata",
foreign_keys=instance_uuid,
primaryjoin=primary_join)
@@ -987,7 +993,7 @@ class InstanceTypeProjects(BASE, NovaBase):
nullable=False)
project_id = Column(String(255))
- instance_type = relationship(InstanceTypes, backref="projects",
+ instance_type = orm.relationship(InstanceTypes, backref="projects",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeProjects.instance_type_id == InstanceTypes.id,'
@@ -1011,7 +1017,7 @@ class InstanceTypeExtraSpecs(BASE, NovaBase):
value = Column(String(255))
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
nullable=False)
- instance_type = relationship(InstanceTypes, backref="extra_specs",
+ instance_type = orm.relationship(InstanceTypes, backref="extra_specs",
foreign_keys=instance_type_id,
primaryjoin='and_('
'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
@@ -1074,13 +1080,13 @@ class Aggregate(BASE, NovaBase):
__table_args__ = ()
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(255))
- _hosts = relationship(AggregateHost,
+ _hosts = orm.relationship(AggregateHost,
primaryjoin='and_('
'Aggregate.id == AggregateHost.aggregate_id,'
'AggregateHost.deleted == 0,'
'Aggregate.deleted == 0)')
- _metadata = relationship(AggregateMetadata,
+ _metadata = orm.relationship(AggregateMetadata,
primaryjoin='and_('
'Aggregate.id == AggregateMetadata.aggregate_id,'
'AggregateMetadata.deleted == 0,'
@@ -1303,19 +1309,6 @@ class InstanceGroupPolicy(BASE, NovaBase):
nullable=False)
-class InstanceGroupMetadata(BASE, NovaBase):
- """Represents a key/value pair for an instance group."""
- __tablename__ = 'instance_group_metadata'
- __table_args__ = (
- Index('instance_group_metadata_key_idx', 'key'),
- )
- id = Column(Integer, primary_key=True, nullable=False)
- key = Column(String(255))
- value = Column(String(255))
- group_id = Column(Integer, ForeignKey('instance_groups.id'),
- nullable=False)
-
-
class InstanceGroup(BASE, NovaBase):
"""Represents an instance group.
@@ -1334,15 +1327,11 @@ class InstanceGroup(BASE, NovaBase):
project_id = Column(String(255))
uuid = Column(String(36), nullable=False)
name = Column(String(255))
- _policies = relationship(InstanceGroupPolicy, primaryjoin='and_('
+ _policies = orm.relationship(InstanceGroupPolicy, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupPolicy.group_id,'
'InstanceGroupPolicy.deleted == 0,'
'InstanceGroup.deleted == 0)')
- _metadata = relationship(InstanceGroupMetadata, primaryjoin='and_('
- 'InstanceGroup.id == InstanceGroupMetadata.group_id,'
- 'InstanceGroupMetadata.deleted == 0,'
- 'InstanceGroup.deleted == 0)')
- _members = relationship(InstanceGroupMember, primaryjoin='and_('
+ _members = orm.relationship(InstanceGroupMember, primaryjoin='and_('
'InstanceGroup.id == InstanceGroupMember.group_id,'
'InstanceGroupMember.deleted == 0,'
'InstanceGroup.deleted == 0)')
@@ -1351,10 +1340,6 @@ class InstanceGroup(BASE, NovaBase):
def policies(self):
return [p.policy for p in self._policies]
- @property
- def metadetails(self):
- return dict((m.key, m.value) for m in self._metadata)
-
@property
def members(self):
return [m.instance_id for m in self._members]
@@ -1395,7 +1380,7 @@ class PciDevice(BASE, NovaBase):
extra_info = Column(Text)
instance_uuid = Column(String(36))
- instance = relationship(Instance, backref="pci_devices",
+ instance = orm.relationship(Instance, backref="pci_devices",
foreign_keys=instance_uuid,
primaryjoin='and_('
'PciDevice.instance_uuid == Instance.uuid,'
diff --git a/nova/db/sqlalchemy/utils.py b/nova/db/sqlalchemy/utils.py
index 79cecc171b..eafe3c2481 100644
--- a/nova/db/sqlalchemy/utils.py
+++ b/nova/db/sqlalchemy/utils.py
@@ -23,8 +23,8 @@
from nova.db.sqlalchemy import api as db
from nova import exception
+from nova.i18n import _
from nova.openstack.common.db.sqlalchemy import utils as oslodbutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
@@ -93,10 +93,8 @@ def create_shadow_table(migrate_engine, table_name=None, table=None,
:param table_name: Autoload table with this name and create shadow table
:param table: Autoloaded table, so just create corresponding shadow table.
:param col_name_col_instance: contains pair column_name=column_instance.
- column_instance is instance of Column. These params
- are required only for columns that have unsupported
- types by sqlite. For example BigInteger.
-
+ column_instance is instance of Column. These params are required only for
+ columns that have unsupported types by sqlite. For example BigInteger.
:returns: The created shadow_table object.
"""
meta = MetaData(bind=migrate_engine)
diff --git a/nova/debugger.py b/nova/debugger.py
index 29d25d482f..8aa889f1a2 100644
--- a/nova/debugger.py
+++ b/nova/debugger.py
@@ -60,7 +60,7 @@ def init():
if not (CONF.remote_debug.host and CONF.remote_debug.port):
return
- from nova.openstack.common.gettextutils import _
+ from nova.i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/exception.py b/nova/exception.py
index 8eff489bb2..05c6ecef9e 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -28,8 +28,8 @@
from oslo.config import cfg
import webob.exc
+from nova.i18n import _
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import safe_utils
@@ -316,10 +316,6 @@ class InvalidContentType(Invalid):
msg_fmt = _("Invalid content type %(content_type)s.")
-class InvalidCidr(Invalid):
- msg_fmt = _("Invalid cidr %(cidr)s.")
-
-
class InvalidUnicodeParameter(Invalid):
msg_fmt = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
@@ -594,10 +590,46 @@ class NetworkInUse(NovaException):
msg_fmt = _("Network %(network_id)s is still in use.")
-class NetworkNotCreated(NovaException):
+class NetworkNotCreated(Invalid):
msg_fmt = _("%(req)s is required to create a network.")
+class LabelTooLong(Invalid):
+ msg_fmt = _("Maximum allowed length for 'label' is 255.")
+
+
+class InvalidIntValue(Invalid):
+ msg_fmt = _("%(key)s must be an integer.")
+
+
+class InvalidCidr(Invalid):
+ msg_fmt = _("%(cidr)s is not a valid ip network.")
+
+
+class InvalidAddress(Invalid):
+ msg_fmt = _("%(address)s is not a valid ip address.")
+
+
+class AddressOutOfRange(Invalid):
+ msg_fmt = _("%(address)s is not within %(cidr)s.")
+
+
+class DuplicateVlan(NovaException):
+ msg_fmt = _("Detected existing vlan with id %(vlan)d")
+ code = 409
+
+
+class CidrConflict(NovaException):
+ msg_fmt = _('Requested cidr (%(cidr)s) conflicts '
+ 'with existing cidr (%(other)s)')
+ code = 409
+
+
+class NetworkHasProject(NetworkInUse):
+ msg_fmt = _('Network must be disassociated from project '
+ '%(project_id)s before it can be deleted.')
+
+
class NetworkNotFound(NotFound):
msg_fmt = _("Network %(network_id)s could not be found.")
@@ -650,6 +682,10 @@ class ExternalNetworkAttachForbidden(Forbidden):
"external network %(network_uuid)s")
+class NetworkMissingPhysicalNetwork(NovaException):
+ msg_fmt = _("Physical network is missing for network %(network_uuid)s")
+
+
class DatastoreNotFound(NotFound):
msg_fmt = _("Could not find the datastore reference(s) which the VM uses.")
@@ -819,6 +855,10 @@ class InvalidQuotaValue(Invalid):
"resources: %(unders)s")
+class InvalidQuotaMethodUsage(Invalid):
+ msg_fmt = _("Wrong quota method %(method)s used on resource %(res)s")
+
+
class QuotaNotFound(NotFound):
msg_fmt = _("Quota could not be found")
@@ -1144,6 +1184,9 @@ class NoValidHost(NovaException):
class QuotaError(NovaException):
ec2_code = 'ResourceLimitExceeded'
msg_fmt = _("Quota exceeded: code=%(code)s")
+ # NOTE(cyeoh): 413 should only be used for the ec2 API
+ # The error status code for out of quota for the nova api should be
+ # 403 Forbidden.
code = 413
headers = {'Retry-After': 0}
safe = True
@@ -1170,11 +1213,11 @@ class OnsetFileLimitExceeded(QuotaError):
msg_fmt = _("Personality file limit exceeded")
-class OnsetFilePathLimitExceeded(QuotaError):
+class OnsetFilePathLimitExceeded(OnsetFileLimitExceeded):
msg_fmt = _("Personality file path too long")
-class OnsetFileContentLimitExceeded(QuotaError):
+class OnsetFileContentLimitExceeded(OnsetFileLimitExceeded):
msg_fmt = _("Personality file content too long")
@@ -1227,15 +1270,6 @@ class InstancePasswordSetFailed(NovaException):
safe = True
-class DuplicateVlan(NovaException):
- msg_fmt = _("Detected existing vlan with id %(vlan)d")
-
-
-class CidrConflict(NovaException):
- msg_fmt = _("There was a conflict when trying to complete your request.")
- code = 409
-
-
class InstanceNotFound(NotFound):
ec2_code = 'InvalidInstanceID.NotFound'
msg_fmt = _("Instance %(instance_id)s could not be found.")
@@ -1259,8 +1293,13 @@ class MarkerNotFound(NotFound):
class InvalidInstanceIDMalformed(Invalid):
+ msg_fmt = _("Invalid id: %(instance_id)s (expecting \"i-...\")")
ec2_code = 'InvalidInstanceID.Malformed'
- msg_fmt = _("Invalid id: %(val)s (expecting \"i-...\").")
+
+
+class InvalidVolumeIDMalformed(Invalid):
+ msg_fmt = _("Invalid id: %(volume_id)s (expecting \"i-...\")")
+ ec2_code = 'InvalidVolumeID.Malformed'
class CouldNotFetchImage(NovaException):
@@ -1298,11 +1337,13 @@ class ConfigDriveUnknownFormat(NovaException):
class InterfaceAttachFailed(Invalid):
- msg_fmt = _("Failed to attach network adapter device to %(instance)s")
+ msg_fmt = _("Failed to attach network adapter device to "
+ "%(instance_uuid)s")
class InterfaceDetachFailed(Invalid):
- msg_fmt = _("Failed to detach network adapter device from %(instance)s")
+ msg_fmt = _("Failed to detach network adapter device from "
+ "%(instance_uuid)s")
class InstanceUserDataTooLarge(NovaException):
@@ -1487,6 +1528,17 @@ class PciDeviceWrongAddressFormat(NovaException):
msg_fmt = _("The PCI address %(address)s has an incorrect format.")
+class PciDeviceInvalidAddressField(NovaException):
+ msg_fmt = _("Invalid PCI Whitelist: "
+ "The PCI address %(address)s has an invalid %(field)s.")
+
+
+class PciDeviceInvalidDeviceName(NovaException):
+ msg_fmt = _("Invalid PCI Whitelist: "
+ "The PCI whitelist can specify devname or address,"
+ " but not both")
+
+
class PciDeviceNotFoundById(NotFound):
msg_fmt = _("PCI device %(id)s not found")
@@ -1585,12 +1637,6 @@ class InvalidWatchdogAction(Invalid):
msg_fmt = _("Provided watchdog action (%(action)s) is not supported.")
-class NoLiveMigrationForConfigDriveInLibVirt(NovaException):
- msg_fmt = _("Live migration of instances with config drives is not "
- "supported in libvirt unless libvirt instance path and "
- "drive data is shared across compute nodes.")
-
-
class LiveMigrationWithOldNovaNotSafe(NovaException):
msg_fmt = _("Host %(server)s is running an old version of Nova, "
"live migrations involving that version may cause data loss. "
@@ -1614,3 +1660,39 @@ class ImageVCPUTopologyRangeExceeded(Invalid):
class ImageVCPULimitsRangeImpossible(Invalid):
msg_fmt = _("Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d "
"are impossible to satisfy for vcpus count %(vcpus)d")
+
+
+class InvalidArchitectureName(Invalid):
+ msg_fmt = _("Architecture name '%(arch)s' is not recognised")
+
+
+class ImageNUMATopologyIncomplete(Invalid):
+ msg_fmt = _("CPU and memory allocation must be provided for all "
+ "NUMA nodes")
+
+
+class ImageNUMATopologyForbidden(Invalid):
+ msg_fmt = _("Image property '%(name)s' is not permitted to override "
+ "NUMA configuration set against the flavor")
+
+
+class ImageNUMATopologyAsymmetric(Invalid):
+ msg_fmt = _("Asymmetric NUMA topologies require explicit assignment "
+ "of CPUs and memory to nodes in image or flavor")
+
+
+class ImageNUMATopologyCPUOutOfRange(Invalid):
+ msg_fmt = _("CPU number %(cpunum)d is larger than max %(cpumax)d")
+
+
+class ImageNUMATopologyCPUDuplicates(Invalid):
+ msg_fmt = _("CPU number %(cpunum)d is assigned to two nodes")
+
+
+class ImageNUMATopologyCPUsUnassigned(Invalid):
+ msg_fmt = _("CPU number %(cpuset)s is not assigned to any node")
+
+
+class ImageNUMATopologyMemoryOutOfRange(Invalid):
+ msg_fmt = _("%(memsize)d MB of memory assigned, but expected "
+ "%(memtotal)d MB")
diff --git a/nova/filters.py b/nova/filters.py
index 0fcb8560c4..1ecd988249 100644
--- a/nova/filters.py
+++ b/nova/filters.py
@@ -17,8 +17,8 @@
Filter support
"""
+from nova.i18n import _
from nova import loadables
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/hacking/checks.py b/nova/hacking/checks.py
index b8480cd51f..10983c2c1c 100644
--- a/nova/hacking/checks.py
+++ b/nova/hacking/checks.py
@@ -31,6 +31,8 @@
"""
+UNDERSCORE_IMPORT_FILES = []
+
session_check = re.compile(r"\w*def [a-zA-Z0-9].*[(].*session.*[)]")
cfg_re = re.compile(r".*\scfg\.")
vi_header_re = re.compile(r"^#\s+vim?:.+")
@@ -54,7 +56,14 @@
conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w")
log_translation = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")")
+translated_log = re.compile(
+ r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
+ "\(\s*_\(\s*('|\")")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
+string_translation = re.compile(r"[^_]*_\(\s*('|\")")
+underscore_import_check = re.compile(r"(.)*import _(.)*")
+# We need this for cases where they have created their own _ function.
+custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
def import_no_db_in_virt(logical_line, filename):
@@ -256,6 +265,44 @@ def no_mutable_default_args(logical_line):
yield (0, msg)
+def check_explicit_underscore_import(logical_line, filename):
+ """Check for explicit import of the _ function
+
+ We need to ensure that any files that are using the _() function
+ to translate logs are explicitly importing the _ function. We
+ can't trust unit test to catch whether the import has been
+ added so we need to check for it here.
+ """
+
+ # Build a list of the files that have _ imported. No further
+ # checking needed once it is found.
+ if filename in UNDERSCORE_IMPORT_FILES:
+ pass
+ elif (underscore_import_check.match(logical_line) or
+ custom_underscore_check.match(logical_line)):
+ UNDERSCORE_IMPORT_FILES.append(filename)
+ elif (translated_log.match(logical_line) or
+ string_translation.match(logical_line)):
+ yield(0, "N323: Found use of _() without explicit import of _ !")
+
+
+def use_jsonutils(logical_line, filename):
+ # the code below that path is not meant to be executed from neutron
+ # tree where jsonutils module is present, so don't enforce its usage
+ # for this subdirectory
+ if "plugins/xenserver" in filename:
+ return
+
+ msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s"
+
+ if "json." in logical_line:
+ json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
+ for f in json_funcs:
+ pos = logical_line.find('json.%s' % f)
+ if pos != -1:
+ yield (pos, msg % {'fun': f[:-1]})
+
+
def factory(register):
register(import_no_db_in_virt)
register(no_db_session_in_public_api)
@@ -272,3 +319,5 @@ def factory(register):
register(no_setting_conf_directly_in_tests)
register(validate_log_translations)
register(no_mutable_default_args)
+ register(check_explicit_underscore_import)
+ register(use_jsonutils)
diff --git a/nova/hooks.py b/nova/hooks.py
index d56455e2a1..735b89338b 100644
--- a/nova/hooks.py
+++ b/nova/hooks.py
@@ -23,22 +23,22 @@
Hook objects are loaded by HookLoaders. Each named hook may invoke multiple
Hooks.
-Example Hook object:
+Example Hook object::
-class MyHook(object):
- def pre(self, *args, **kwargs):
- # do stuff before wrapped callable runs
+ | class MyHook(object):
+ | def pre(self, *args, **kwargs):
+ | # do stuff before wrapped callable runs
+ |
+ | def post(self, rv, *args, **kwargs):
+ | # do stuff after wrapped callable runs
- def post(self, rv, *args, **kwargs):
- # do stuff after wrapped callable runs
+Example Hook object with function parameters::
-Example Hook object with function parameters:
-
-class MyHookWithFunction(object):
- def pre(self, f, *args, **kwargs):
- # do stuff with wrapped function info
- def post(self, f, *args, **kwards):
- # do stuff with wrapped function info
+ | class MyHookWithFunction(object):
+ | def pre(self, f, *args, **kwargs):
+ | # do stuff with wrapped function info
+ | def post(self, f, *args, **kwargs):
+ | # do stuff with wrapped function info
"""
@@ -46,7 +46,7 @@ def post(self, f, *args, **kwards):
import stevedore
-from nova.openstack.common.gettextutils import _LE
+from nova.i18n import _LE
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/i18n.py b/nova/i18n.py
new file mode 100644
index 0000000000..e3e5673398
--- /dev/null
+++ b/nova/i18n.py
@@ -0,0 +1,66 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""oslo.i18n integration module.
+
+See http://docs.openstack.org/developer/oslo.i18n/usage.html .
+
+"""
+
+from oslo import i18n
+
+from nova.openstack.common import gettextutils
+
+DOMAIN = 'nova'
+
+_translators = i18n.TranslatorFactory(domain=DOMAIN)
+
+# The primary translation function using the well-known name "_"
+_ = _translators.primary
+
+# Translators for log levels.
+#
+# The abbreviated names are meant to reflect the usual use of a short
+# name like '_'. The "L" is for "log" and the other letter comes from
+# the level.
+_LI = _translators.log_info
+_LW = _translators.log_warning
+_LE = _translators.log_error
+_LC = _translators.log_critical
+
+
+def translate(value, user_locale):
+ return i18n.translate(value, user_locale)
+
+
+def get_available_languages():
+ return i18n.get_available_languages(DOMAIN)
+
+
+# Parts in oslo-incubator are still using gettextutils._(), _LI(), etc., from
+# oslo-incubator. Until these parts are changed to use oslo.i18n, Keystone
+# needs to do something to allow them to work. One option is to continue to
+# initialize gettextutils, but with the way that Nova has initialization
+# spread out over mutltiple entry points, we'll monkey-patch
+# gettextutils._(), _LI(), etc., to use our oslo.i18n versions.
+
+# FIXME(dims): Remove the monkey-patching and update openstack-common.conf and
+# do a sync with oslo-incubator to remove gettextutils once oslo-incubator
+# isn't using oslo-incubator gettextutils any more.
+
+gettextutils._ = _
+gettextutils._LI = _LI
+gettextutils._LW = _LW
+gettextutils._LE = _LE
+gettextutils._LC = _LC
diff --git a/nova/image/api.py b/nova/image/api.py
index ad7096162d..a43fdb156a 100644
--- a/nova/image/api.py
+++ b/nova/image/api.py
@@ -46,11 +46,11 @@ def _get_session(self, _context):
:param context: The `nova.context.Context` object for the request
"""
- #TODO(jaypipes): Refactor glance.get_remote_image_service and
- # glance.get_default_image_service into a single
- # method that takes a context and actually respects
- # it, returning a real session object that keeps
- # the context alive...
+ # TODO(jaypipes): Refactor glance.get_remote_image_service and
+ # glance.get_default_image_service into a single
+ # method that takes a context and actually respects
+ # it, returning a real session object that keeps
+ # the context alive...
return glance.get_default_image_service()
def get_all(self, context, **kwargs):
@@ -61,13 +61,13 @@ def get_all(self, context, **kwargs):
are owned by the requesting user in the ACTIVE status are returned.
:param context: The `nova.context.Context` object for the request
- :param **kwargs: A dictionary of filter and pagination values that
- may be passed to the underlying image info driver.
+ :param kwargs: A dictionary of filter and pagination values that
+ may be passed to the underlying image info driver.
"""
session = self._get_session(context)
return session.detail(context, **kwargs)
- def get(self, context, id_or_uri):
+ def get(self, context, id_or_uri, include_locations=False):
"""Retrieves the information record for a single disk image. If the
supplied identifier parameter is a UUID, the default driver will
be used to return information about the image. If the supplied
@@ -77,9 +77,16 @@ def get(self, context, id_or_uri):
:param context: The `nova.context.Context` object for the request
:param id_or_uri: A UUID identifier or an image URI to look up image
information for.
+ :param include_locations: (Optional) include locations in the returned
+ dict of information if the image service API
+ supports it. If the image service API does
+ not support the locations attribute, it will
+ still be included in the returned dict, as an
+ empty list.
"""
session, image_id = self._get_session_and_image_id(context, id_or_uri)
- return session.show(context, image_id)
+ return session.show(context, image_id,
+ include_locations=include_locations)
def create(self, context, image_info, data=None):
"""Creates a new image record, optionally passing the image bits to
@@ -108,7 +115,7 @@ def update(self, context, id_or_uri, image_info,
passed to the image registry.
:param data: Optional file handle or bytestream iterator that is
passed to backend storage.
- :param purge_props: Optional, defaults to True. If set, the backend
+ :param purge_props: Optional, defaults to False. If set, the backend
image registry will clear all image properties
and replace them the image properties supplied
in the image_info dictionary's 'properties'
diff --git a/nova/image/download/__init__.py b/nova/image/download/__init__.py
index ad0affb213..55d125b85c 100644
--- a/nova/image/download/__init__.py
+++ b/nova/image/download/__init__.py
@@ -19,7 +19,7 @@
import stevedore.driver
import stevedore.extension
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
diff --git a/nova/image/download/file.py b/nova/image/download/file.py
index 7dc6316703..a416835c87 100644
--- a/nova/image/download/file.py
+++ b/nova/image/download/file.py
@@ -18,8 +18,8 @@
from oslo.config import cfg
from nova import exception
+from nova.i18n import _
import nova.image.download.base as xfer_base
-from nova.openstack.common.gettextutils import _
import nova.virt.libvirt.utils as lv_utils
@@ -70,7 +70,7 @@ class FileTransfer(xfer_base.TransferBase):
desc_required_keys = ['id', 'mountpoint']
- #NOTE(jbresnah) because the group under which these options are added is
+ # NOTE(jbresnah) because the group under which these options are added is
# dyncamically determined these options need to stay out of global space
# or they will confuse generate_sample.sh
filesystem_opts = [
@@ -143,7 +143,7 @@ def _normalize_destination(self, nova_mount, glance_mount, path):
def download(self, context, url_parts, dst_file, metadata, **kwargs):
self.filesystems = self._get_options()
if not self.filesystems:
- #NOTE(jbresnah) when nothing is configured assume legacy behavior
+ # NOTE(jbresnah) when nothing is configured assume legacy behavior
nova_mountpoint = '/'
glance_mountpoint = '/'
else:
diff --git a/nova/image/glance.py b/nova/image/glance.py
index de81d3d51d..fccbafa55b 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -19,7 +19,6 @@
import copy
import itertools
-import json
import random
import sys
import time
@@ -31,8 +30,8 @@
import six.moves.urllib.parse as urlparse
from nova import exception
+from nova.i18n import _
import nova.image.download as image_xfers
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -126,7 +125,7 @@ def generate_identity_headers(context, status='Confirmed'):
'X-Tenant-Id': getattr(context, 'tenant', None),
'X-Roles': ','.join(context.roles),
'X-Identity-Status': status,
- 'X-Service-Catalog': json.dumps(context.service_catalog),
+ 'X-Service-Catalog': jsonutils.dumps(context.service_catalog),
}
@@ -148,7 +147,7 @@ def _create_glance_client(context, host, port, use_ssl, version=1):
params['token'] = context.auth_token
params['identity_headers'] = generate_identity_headers(context)
if utils.is_valid_ipv6(host):
- #if so, it is ipv6 address, need to wrap it with '[]'
+ # if so, it is ipv6 address, need to wrap it with '[]'
host = '[%s]' % host
endpoint = '%s://%s:%s' % (scheme, host, port)
return glanceclient.Client(str(version), endpoint, **params)
@@ -250,7 +249,7 @@ class GlanceImageService(object):
def __init__(self, client=None):
self._client = client or GlanceClientWrapper()
- #NOTE(jbresnah) build the table of download handlers at the beginning
+ # NOTE(jbresnah) build the table of download handlers at the beginning
# so that operators can catch errors at load time rather than whenever
# a user attempts to use a module. Note this cannot be done in glance
# space when this python module is loaded because the download module
@@ -284,18 +283,39 @@ def detail(self, context, **kwargs):
return _images
- def show(self, context, image_id):
- """Returns a dict with image data for the given opaque image id."""
+ def show(self, context, image_id, include_locations=False):
+ """Returns a dict with image data for the given opaque image id.
+
+ :param context: The context object to pass to image client
+ :param image_id: The UUID of the image
+ :param include_locations: (Optional) include locations in the returned
+ dict of information if the image service API
+ supports it. If the image service API does
+ not support the locations attribute, it will
+ still be included in the returned dict, as an
+ empty list.
+ """
+ version = 1
+ if include_locations:
+ version = 2
try:
- image = self._client.call(context, 1, 'get', image_id)
+ image = self._client.call(context, version, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not _is_image_available(context, image):
raise exception.ImageNotFound(image_id=image_id)
- base_image_meta = _translate_from_glance(image)
- return base_image_meta
+ image = _translate_from_glance(image,
+ include_locations=include_locations)
+ if include_locations:
+ locations = image.get('locations', None) or []
+ du = image.get('direct_url', None)
+ if du:
+ locations.append({'url': du, 'metadata': {}})
+ image['locations'] = locations
+
+ return image
def _get_transfer_module(self, scheme):
try:
@@ -310,8 +330,8 @@ def _get_transfer_module(self, scheme):
def download(self, context, image_id, data=None, dst_path=None):
"""Calls out to Glance for data and writes data."""
if CONF.glance.allowed_direct_url_schemes and dst_path is not None:
- locations = _get_locations(self._client, context, image_id)
- for entry in locations:
+ image = self.show(context, image_id, include_locations=True)
+ for entry in image.get('locations', []):
loc_url = entry['url']
loc_meta = entry['metadata']
o = urlparse.urlparse(loc_url)
@@ -366,7 +386,7 @@ def update(self, context, image_id, image_meta, data=None,
"""Modify the given image with the new data."""
image_meta = _translate_to_glance(image_meta)
image_meta['purge_props'] = purge_props
- #NOTE(bcwaldon): id is not an editable field, but it is likely to be
+ # NOTE(bcwaldon): id is not an editable field, but it is likely to be
# passed in by calling code. Let's be nice and ignore it.
image_meta.pop('id', None)
if data:
@@ -396,25 +416,6 @@ def delete(self, context, image_id):
return True
-def _get_locations(client, context, image_id):
- """Returns the direct url representing the backend storage location,
- or None if this attribute is not shown by Glance.
- """
- try:
- image_meta = client.call(context, 2, 'get', image_id)
- except Exception:
- _reraise_translated_image_exception(image_id)
-
- if not _is_image_available(context, image_meta):
- raise exception.ImageNotFound(image_id=image_id)
-
- locations = getattr(image_meta, 'locations', [])
- du = getattr(image_meta, 'direct_url', None)
- if du:
- locations.append({'url': du, 'metadata': {}})
- return locations
-
-
def _extract_query_params(params):
_params = {}
accepted_params = ('filters', 'marker', 'limit',
@@ -478,8 +479,9 @@ def _translate_to_glance(image_meta):
return image_meta
-def _translate_from_glance(image):
- image_meta = _extract_attributes(image)
+def _translate_from_glance(image, include_locations=False):
+ image_meta = _extract_attributes(image,
+ include_locations=include_locations)
image_meta = _convert_timestamps_to_datetimes(image_meta)
image_meta = _convert_from_string(image_meta)
return image_meta
@@ -528,8 +530,8 @@ def _convert_to_string(metadata):
return _convert(_json_dumps, metadata)
-def _extract_attributes(image):
- #NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform
+def _extract_attributes(image, include_locations=False):
+ # NOTE(hdd): If a key is not found, base.Resource.__getattr__() may perform
# a get(), resulting in a useless request back to glance. This list is
# therefore sorted, with dependent attributes as the end
# 'deleted_at' depends on 'deleted'
@@ -538,10 +540,12 @@ def _extract_attributes(image):
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
- 'min_disk', 'min_ram', 'is_public']
+ 'min_disk', 'min_ram', 'is_public',
+ 'direct_url', 'locations']
queued = getattr(image, 'status') == 'queued'
queued_exclude_attrs = ['disk_format', 'container_format']
+ include_locations_attrs = ['direct_url', 'locations']
output = {}
for attr in IMAGE_ATTRIBUTES:
@@ -552,9 +556,13 @@ def _extract_attributes(image):
# image may not have 'name' attr
elif attr == 'name':
output[attr] = getattr(image, attr, None)
- #NOTE(liusheng): queued image may not have these attributes and 'name'
+ # NOTE(liusheng): queued image may not have these attributes and 'name'
elif queued and attr in queued_exclude_attrs:
output[attr] = getattr(image, attr, None)
+ # NOTE(mriedem): Only get location attrs if including locations.
+ elif attr in include_locations_attrs:
+ if include_locations:
+ output[attr] = getattr(image, attr, None)
else:
# NOTE(xarses): Anything that is caught with the default value
# will result in a additional lookup to glance for said attr.
@@ -624,7 +632,7 @@ def get_remote_image_service(context, image_href):
:returns: a tuple of the form (image_service, image_id)
"""
- #NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
+ # NOTE(bcwaldon): If image_href doesn't look like a URI, assume its a
# standalone image ID
if '/' not in str(image_href):
image_service = get_default_image_service()
diff --git a/nova/image/s3.py b/nova/image/s3.py
index db54630bef..5d5c4edff0 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -31,8 +31,8 @@
from nova.api.ec2 import ec2utils
import nova.cert.rpcapi
from nova import exception
+from nova.i18n import _, _LE
from nova.image import glance
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
@@ -160,7 +160,7 @@ def update(self, context, image_id, metadata, data=None):
return self._translate_uuid_to_id(context, image)
def detail(self, context, **kwargs):
- #NOTE(bcwaldon): sort asc to make sure we assign lower ids
+ # NOTE(bcwaldon): sort asc to make sure we assign lower ids
# to older images
kwargs.setdefault('sort_dir', 'asc')
images = self.service.detail(context, **kwargs)
@@ -264,7 +264,7 @@ def _translate_dependent_image_id(image_key, image_id):
'properties': properties})
metadata['properties']['image_state'] = 'pending'
- #TODO(bcwaldon): right now, this removes user-defined ids.
+ # TODO(bcwaldon): right now, this removes user-defined ids.
# We need to re-enable this.
metadata.pop('id', None)
@@ -328,8 +328,8 @@ def _update_image_data(context, image_uuid, image_data):
shutil.copyfileobj(part, combined)
except Exception:
- LOG.exception(_("Failed to download %(image_location)s "
- "to %(image_path)s"), log_vars)
+ LOG.exception(_LE("Failed to download %(image_location)s "
+ "to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_download')
return
@@ -345,8 +345,8 @@ def _update_image_data(context, image_uuid, image_data):
self._decrypt_image(context, enc_filename, encrypted_key,
encrypted_iv, dec_filename)
except Exception:
- LOG.exception(_("Failed to decrypt %(image_location)s "
- "to %(image_path)s"), log_vars)
+ LOG.exception(_LE("Failed to decrypt %(image_location)s "
+ "to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_decrypt')
return
@@ -356,8 +356,8 @@ def _update_image_data(context, image_uuid, image_data):
unz_filename = self._untarzip_image(image_path,
dec_filename)
except Exception:
- LOG.exception(_("Failed to untar %(image_location)s "
- "to %(image_path)s"), log_vars)
+ LOG.exception(_LE("Failed to untar %(image_location)s "
+ "to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_untar')
return
@@ -366,8 +366,8 @@ def _update_image_data(context, image_uuid, image_data):
with open(unz_filename) as image_file:
_update_image_data(context, image_uuid, image_file)
except Exception:
- LOG.exception(_("Failed to upload %(image_location)s "
- "to %(image_path)s"), log_vars)
+ LOG.exception(_LE("Failed to upload %(image_location)s "
+ "to %(image_path)s"), log_vars)
_update_image_state(context, image_uuid, 'failed_upload')
return
diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py
index db9c658968..23d77c7f95 100644
--- a/nova/ipv6/account_identifier.py
+++ b/nova/ipv6/account_identifier.py
@@ -21,7 +21,7 @@
import netaddr
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
def to_global(prefix, mac, project_id):
diff --git a/nova/ipv6/rfc2462.py b/nova/ipv6/rfc2462.py
index cda35b0a45..92746e5a07 100644
--- a/nova/ipv6/rfc2462.py
+++ b/nova/ipv6/rfc2462.py
@@ -19,7 +19,7 @@
import netaddr
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
def to_global(prefix, mac, project_id):
diff --git a/nova/keymgr/conf_key_mgr.py b/nova/keymgr/conf_key_mgr.py
index 4b9cb67208..3cb44a5a80 100644
--- a/nova/keymgr/conf_key_mgr.py
+++ b/nova/keymgr/conf_key_mgr.py
@@ -33,8 +33,8 @@
from oslo.config import cfg
+from nova.i18n import _
from nova.keymgr import single_key_mgr
-from nova.openstack.common.gettextutils import _
key_mgr_opts = [
cfg.StrOpt('fixed_key',
diff --git a/nova/keymgr/key_mgr.py b/nova/keymgr/key_mgr.py
index 4fb4f07bc0..c020ca2474 100644
--- a/nova/keymgr/key_mgr.py
+++ b/nova/keymgr/key_mgr.py
@@ -60,8 +60,10 @@ def copy_key(self, ctxt, key_id, **kwargs):
the specified context does not permit copying keys, then a
NotAuthorized error should be raised.
- Implementation note: This method should behave identically to
+ Implementation note: This method should behave identically to::
+
store_key(context, get_key(context, ))
+
although it is preferable to perform this operation within the key
manager to avoid unnecessary handling of the key material.
"""
diff --git a/nova/keymgr/mock_key_mgr.py b/nova/keymgr/mock_key_mgr.py
index 51684fec46..af09b6877b 100644
--- a/nova/keymgr/mock_key_mgr.py
+++ b/nova/keymgr/mock_key_mgr.py
@@ -29,9 +29,9 @@
import array
from nova import exception
+from nova.i18n import _
from nova.keymgr import key
from nova.keymgr import key_mgr
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import utils
diff --git a/nova/keymgr/single_key_mgr.py b/nova/keymgr/single_key_mgr.py
index b6d4f35d01..33c24c0e63 100644
--- a/nova/keymgr/single_key_mgr.py
+++ b/nova/keymgr/single_key_mgr.py
@@ -20,8 +20,8 @@
from nova import exception
+from nova.i18n import _
from nova.keymgr import mock_key_mgr
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
diff --git a/nova/locale/de/LC_MESSAGES/nova-log-info.po b/nova/locale/de/LC_MESSAGES/nova-log-info.po
index 41f4e94e77..03883e9935 100644
--- a/nova/locale/de/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/de/LC_MESSAGES/nova-log-info.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
"PO-Revision-Date: 2014-06-14 19:30+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: German (http://www.transifex.com/projects/p/nova/language/"
@@ -19,28 +19,78 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+#: nova/api/openstack/__init__.py:101
+#, python-format
+msgid "%(url)s returned with HTTP %(status)d"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:294
+msgid "V3 API has been disabled by configuration"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:688
+#, python-format
+msgid "Fault thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:691
+#, python-format
+msgid "HTTP exception thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_networks.py:101
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128
+#, python-format
+msgid "Deleting network with id %s"
+msgstr ""
+
+#: nova/compute/manager.py:2663
+#, python-format
+msgid "bringing vm to original state: '%s'"
+msgstr ""
+
+#: nova/compute/manager.py:5471
+#, python-format
+msgid ""
+"During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36
+msgid ""
+"Skipped adding reservations_deleted_expire_idx because an equivalent index "
+"already exists."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58
+msgid ""
+"Skipped removing reservations_deleted_expire_idx because index does not "
+"exist."
+msgstr ""
+
#: nova/openstack/common/eventlet_backdoor.py:141
#, python-format
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
msgstr "Eventlet backdoor hört auf %(port)s für Prozess %(pid)d"
-#: nova/openstack/common/lockutils.py:83
+#: nova/openstack/common/lockutils.py:82
#, python-format
msgid "Created lock path: %s"
msgstr ""
-#: nova/openstack/common/lockutils.py:250
+#: nova/openstack/common/lockutils.py:251
#, python-format
msgid "Failed to remove file %(file)s"
msgstr ""
-#: nova/openstack/common/periodic_task.py:125
+#: nova/openstack/common/periodic_task.py:126
#, python-format
msgid "Skipping periodic task %(task)s because its interval is negative"
msgstr ""
"Überspringe periodische Aufgabe %(task)s weil der Intervall negativ ist"
-#: nova/openstack/common/periodic_task.py:130
+#: nova/openstack/common/periodic_task.py:131
#, python-format
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr "Überspringe periodische Aufgabe %(task)s weil sie deaktiviert ist"
@@ -103,195 +153,209 @@ msgstr "Lösche doppelte Zeile mit der ID %(id)s aus der Tabelle %(table)s"
msgid "%(num_values)d values found, of which the minimum value will be used."
msgstr ""
-#: nova/virt/libvirt/driver.py:894
+#: nova/virt/block_device.py:221
+#, python-format
+msgid "preserve multipath_id %s"
+msgstr ""
+
+#: nova/virt/firewall.py:444
+#, python-format
+msgid "instance chain %s disappeared during refresh, skipping"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:139
+msgid "Unable to force TCG mode, libguestfs too old?"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:835
+#, python-format
+msgid ""
+"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:948
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:904
+#: nova/virt/libvirt/driver.py:958
msgid "Instance may be started again."
msgstr ""
-#: nova/virt/libvirt/driver.py:914
+#: nova/virt/libvirt/driver.py:968
msgid "Going to destroy instance again."
msgstr ""
-#: nova/virt/libvirt/driver.py:1518
+#: nova/virt/libvirt/driver.py:1576
msgid "Beginning live snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1579
msgid "Beginning cold snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:1550
+#: nova/virt/libvirt/driver.py:1608
msgid "Snapshot extracted, beginning image upload"
msgstr ""
-#: nova/virt/libvirt/driver.py:1562
+#: nova/virt/libvirt/driver.py:1620
msgid "Snapshot image upload complete"
msgstr ""
-#: nova/virt/libvirt/driver.py:1972
+#: nova/virt/libvirt/driver.py:2132
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2015
+#: nova/virt/libvirt/driver.py:2175
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2023
+#: nova/virt/libvirt/driver.py:2183
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
-#: nova/virt/libvirt/driver.py:2091
+#: nova/virt/libvirt/driver.py:2252
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2259
+#: nova/virt/libvirt/driver.py:2420
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2275
+#: nova/virt/libvirt/driver.py:2436
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341
+#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr ""
-#: nova/virt/libvirt/driver.py:2568
+#: nova/virt/libvirt/driver.py:2731
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:2677
+#: nova/virt/libvirt/driver.py:2857
msgid "Using config drive"
msgstr ""
-#: nova/virt/libvirt/driver.py:2686
+#: nova/virt/libvirt/driver.py:2866
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3223
+#: nova/virt/libvirt/driver.py:3437
msgid "Configuring timezone for windows instance to localtime"
msgstr ""
-#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821
-#: nova/virt/libvirt/driver.py:3849
-#, python-format
-msgid "libvirt can't find a domain with id: %s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:4109
+#: nova/virt/libvirt/driver.py:4320
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4115
+#: nova/virt/libvirt/driver.py:4326
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats for "
"device"
msgstr ""
-#: nova/virt/libvirt/driver.py:4330
+#: nova/virt/libvirt/driver.py:4568
#, python-format
msgid "Instance launched has CPU info: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4986
+#: nova/virt/libvirt/driver.py:5316
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:5226
+#: nova/virt/libvirt/driver.py:5590
#, python-format
msgid "Deleting instance files %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5238
+#: nova/virt/libvirt/driver.py:5603
#, python-format
msgid "Deletion of %s failed"
msgstr ""
-#: nova/virt/libvirt/driver.py:5241
+#: nova/virt/libvirt/driver.py:5607
#, python-format
msgid "Deletion of %s complete"
msgstr ""
-#: nova/virt/libvirt/firewall.py:105
+#: nova/virt/libvirt/firewall.py:106
msgid "Called setup_basic_filtering in nwfilter"
msgstr ""
-#: nova/virt/libvirt/firewall.py:113
+#: nova/virt/libvirt/firewall.py:114
msgid "Ensuring static filters"
msgstr ""
-#: nova/virt/libvirt/firewall.py:306
+#: nova/virt/libvirt/firewall.py:305
msgid "Attempted to unfilter instance which is not filtered"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:191
+#: nova/virt/libvirt/imagecache.py:190
#, python-format
msgid "Writing stored info to %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:401
+#: nova/virt/libvirt/imagecache.py:400
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash stored"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:410
+#: nova/virt/libvirt/imagecache.py:409
#, python-format
msgid "%(id)s (%(base_file)s): generating checksum"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:438
+#: nova/virt/libvirt/imagecache.py:437
#, python-format
msgid "Base file too young to remove: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:441
+#: nova/virt/libvirt/imagecache.py:440
#, python-format
msgid "Removing base file: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:459
+#: nova/virt/libvirt/imagecache.py:458
#, python-format
msgid "image %(id)s at (%(base_file)s): checking"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:483
+#: nova/virt/libvirt/imagecache.py:482
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
"%(remote)d on other nodes sharing this instance storage"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:550
+#: nova/virt/libvirt/imagecache.py:549
#, python-format
msgid "Active base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:553
+#: nova/virt/libvirt/imagecache.py:552
#, python-format
msgid "Corrupt base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:557
+#: nova/virt/libvirt/imagecache.py:556
#, python-format
msgid "Removable base files: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:530
+#: nova/virt/libvirt/utils.py:490
msgid "findmnt tool is not installed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1352
+#: nova/virt/xenapi/vm_utils.py:1355
#, python-format
msgid ""
"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s "
diff --git a/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po b/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po
index 196c1ec0d5..475632a54e 100644
--- a/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/en_AU/LC_MESSAGES/nova-log-info.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
"PO-Revision-Date: 2014-06-14 19:30+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: English (Australia) (http://www.transifex.com/projects/p/nova/"
@@ -19,27 +19,77 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+#: nova/api/openstack/__init__.py:101
+#, python-format
+msgid "%(url)s returned with HTTP %(status)d"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:294
+msgid "V3 API has been disabled by configuration"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:688
+#, python-format
+msgid "Fault thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:691
+#, python-format
+msgid "HTTP exception thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_networks.py:101
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128
+#, python-format
+msgid "Deleting network with id %s"
+msgstr ""
+
+#: nova/compute/manager.py:2663
+#, python-format
+msgid "bringing vm to original state: '%s'"
+msgstr ""
+
+#: nova/compute/manager.py:5471
+#, python-format
+msgid ""
+"During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36
+msgid ""
+"Skipped adding reservations_deleted_expire_idx because an equivalent index "
+"already exists."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58
+msgid ""
+"Skipped removing reservations_deleted_expire_idx because index does not "
+"exist."
+msgstr ""
+
#: nova/openstack/common/eventlet_backdoor.py:141
#, python-format
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
msgstr "Eventlet backdoor listening on %(port)s for process %(pid)d"
-#: nova/openstack/common/lockutils.py:83
+#: nova/openstack/common/lockutils.py:82
#, python-format
msgid "Created lock path: %s"
msgstr ""
-#: nova/openstack/common/lockutils.py:250
+#: nova/openstack/common/lockutils.py:251
#, python-format
msgid "Failed to remove file %(file)s"
msgstr ""
-#: nova/openstack/common/periodic_task.py:125
+#: nova/openstack/common/periodic_task.py:126
#, python-format
msgid "Skipping periodic task %(task)s because its interval is negative"
msgstr "Skipping periodic task %(task)s because its interval is negative"
-#: nova/openstack/common/periodic_task.py:130
+#: nova/openstack/common/periodic_task.py:131
#, python-format
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr "Skipping periodic task %(task)s because it is disabled"
@@ -101,195 +151,209 @@ msgstr "Deleting duplicated row with id: %(id)s from table: %(table)s"
msgid "%(num_values)d values found, of which the minimum value will be used."
msgstr ""
-#: nova/virt/libvirt/driver.py:894
+#: nova/virt/block_device.py:221
+#, python-format
+msgid "preserve multipath_id %s"
+msgstr ""
+
+#: nova/virt/firewall.py:444
+#, python-format
+msgid "instance chain %s disappeared during refresh, skipping"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:139
+msgid "Unable to force TCG mode, libguestfs too old?"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:835
+#, python-format
+msgid ""
+"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:948
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:904
+#: nova/virt/libvirt/driver.py:958
msgid "Instance may be started again."
msgstr ""
-#: nova/virt/libvirt/driver.py:914
+#: nova/virt/libvirt/driver.py:968
msgid "Going to destroy instance again."
msgstr ""
-#: nova/virt/libvirt/driver.py:1518
+#: nova/virt/libvirt/driver.py:1576
msgid "Beginning live snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1579
msgid "Beginning cold snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:1550
+#: nova/virt/libvirt/driver.py:1608
msgid "Snapshot extracted, beginning image upload"
msgstr ""
-#: nova/virt/libvirt/driver.py:1562
+#: nova/virt/libvirt/driver.py:1620
msgid "Snapshot image upload complete"
msgstr ""
-#: nova/virt/libvirt/driver.py:1972
+#: nova/virt/libvirt/driver.py:2132
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2015
+#: nova/virt/libvirt/driver.py:2175
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2023
+#: nova/virt/libvirt/driver.py:2183
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
-#: nova/virt/libvirt/driver.py:2091
+#: nova/virt/libvirt/driver.py:2252
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2259
+#: nova/virt/libvirt/driver.py:2420
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2275
+#: nova/virt/libvirt/driver.py:2436
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341
+#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr ""
-#: nova/virt/libvirt/driver.py:2568
+#: nova/virt/libvirt/driver.py:2731
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:2677
+#: nova/virt/libvirt/driver.py:2857
msgid "Using config drive"
msgstr ""
-#: nova/virt/libvirt/driver.py:2686
+#: nova/virt/libvirt/driver.py:2866
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3223
+#: nova/virt/libvirt/driver.py:3437
msgid "Configuring timezone for windows instance to localtime"
msgstr ""
-#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821
-#: nova/virt/libvirt/driver.py:3849
-#, python-format
-msgid "libvirt can't find a domain with id: %s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:4109
+#: nova/virt/libvirt/driver.py:4320
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4115
+#: nova/virt/libvirt/driver.py:4326
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats for "
"device"
msgstr ""
-#: nova/virt/libvirt/driver.py:4330
+#: nova/virt/libvirt/driver.py:4568
#, python-format
msgid "Instance launched has CPU info: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4986
+#: nova/virt/libvirt/driver.py:5316
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:5226
+#: nova/virt/libvirt/driver.py:5590
#, python-format
msgid "Deleting instance files %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5238
+#: nova/virt/libvirt/driver.py:5603
#, python-format
msgid "Deletion of %s failed"
msgstr ""
-#: nova/virt/libvirt/driver.py:5241
+#: nova/virt/libvirt/driver.py:5607
#, python-format
msgid "Deletion of %s complete"
msgstr ""
-#: nova/virt/libvirt/firewall.py:105
+#: nova/virt/libvirt/firewall.py:106
msgid "Called setup_basic_filtering in nwfilter"
msgstr ""
-#: nova/virt/libvirt/firewall.py:113
+#: nova/virt/libvirt/firewall.py:114
msgid "Ensuring static filters"
msgstr ""
-#: nova/virt/libvirt/firewall.py:306
+#: nova/virt/libvirt/firewall.py:305
msgid "Attempted to unfilter instance which is not filtered"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:191
+#: nova/virt/libvirt/imagecache.py:190
#, python-format
msgid "Writing stored info to %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:401
+#: nova/virt/libvirt/imagecache.py:400
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash stored"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:410
+#: nova/virt/libvirt/imagecache.py:409
#, python-format
msgid "%(id)s (%(base_file)s): generating checksum"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:438
+#: nova/virt/libvirt/imagecache.py:437
#, python-format
msgid "Base file too young to remove: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:441
+#: nova/virt/libvirt/imagecache.py:440
#, python-format
msgid "Removing base file: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:459
+#: nova/virt/libvirt/imagecache.py:458
#, python-format
msgid "image %(id)s at (%(base_file)s): checking"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:483
+#: nova/virt/libvirt/imagecache.py:482
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
"%(remote)d on other nodes sharing this instance storage"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:550
+#: nova/virt/libvirt/imagecache.py:549
#, python-format
msgid "Active base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:553
+#: nova/virt/libvirt/imagecache.py:552
#, python-format
msgid "Corrupt base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:557
+#: nova/virt/libvirt/imagecache.py:556
#, python-format
msgid "Removable base files: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:530
+#: nova/virt/libvirt/utils.py:490
msgid "findmnt tool is not installed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1352
+#: nova/virt/xenapi/vm_utils.py:1355
#, python-format
msgid ""
"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s "
diff --git a/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po b/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po
index 8bd8939bcb..87eaad1f05 100644
--- a/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/en_GB/LC_MESSAGES/nova-log-error.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:08+0000\n"
+"POT-Creation-Date: 2014-08-18 06:04+0000\n"
"PO-Revision-Date: 2014-06-14 19:30+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/"
@@ -39,11 +39,305 @@ msgstr ""
msgid "Exception running %(name)s post-hook: %(obj)s"
msgstr ""
-#: nova/api/ec2/__init__.py:243
+#: nova/api/ec2/__init__.py:244
#, python-format
msgid "Keystone failure: %s"
msgstr ""
+#: nova/api/ec2/__init__.py:493
+#, python-format
+msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:520
+#, python-format
+msgid "Environment: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:155
+#, python-format
+msgid "Failed to get metadata for ip: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:212
+#, python-format
+msgid "Failed to get metadata for instance id: %s"
+msgstr ""
+
+#: nova/api/openstack/common.py:134
+#, python-format
+msgid ""
+"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad "
+"upgrade or db corrupted?"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:684
+#, python-format
+msgid "Exception handling resource: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:68
+#, python-format
+msgid "Compute.api::pause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:90
+#, python-format
+msgid "Compute.api::unpause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:112
+#, python-format
+msgid "compute.api::suspend %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:134
+#, python-format
+msgid "compute.api::resume %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:160
+#, python-format
+msgid "Error in migrate %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:179
+#, python-format
+msgid "Compute.api::reset_network %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:198
+#, python-format
+msgid "Compute.api::inject_network_info %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:215
+#, python-format
+msgid "Compute.api::lock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:234
+#, python-format
+msgid "Compute.api::unlock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:392
+#, python-format
+msgid "Compute.api::resetState %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/multinic.py:85
+#, python-format
+msgid "Unable to find address %r"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85
+msgid "Failed to get default networks"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125
+msgid "Failed to update usages deallocating network."
+msgstr ""
+
+#: nova/compute/api.py:561
+msgid "Failed to set instance name using multi_instance_display_name_template."
+msgstr ""
+
+#: nova/compute/api.py:1429
+msgid ""
+"Something wrong happened when trying to delete snapshot from shelved "
+"instance."
+msgstr ""
+
+#: nova/compute/api.py:3732
+msgid "Failed to update usages deallocating security group"
+msgstr ""
+
+#: nova/compute/flavors.py:167
+#, python-format
+msgid "DB error: %s"
+msgstr ""
+
+#: nova/compute/flavors.py:178
+#, python-format
+msgid "Instance type %s not found for deletion"
+msgstr ""
+
+#: nova/compute/manager.py:366
+#, python-format
+msgid "Error while trying to clean up image %s"
+msgstr ""
+
+#: nova/compute/manager.py:755
+msgid "Failed to check if instance shared"
+msgstr ""
+
+#: nova/compute/manager.py:821 nova/compute/manager.py:872
+msgid "Failed to complete a deletion"
+msgstr ""
+
+#: nova/compute/manager.py:913
+msgid "Failed to stop instance"
+msgstr ""
+
+#: nova/compute/manager.py:925
+msgid "Failed to start instance"
+msgstr ""
+
+#: nova/compute/manager.py:950
+msgid "Failed to revert crashed migration"
+msgstr ""
+
+#: nova/compute/manager.py:1364
+msgid "Failed to dealloc network for deleted instance"
+msgstr ""
+
+#: nova/compute/manager.py:1385
+msgid "Failed to dealloc network for failed instance"
+msgstr ""
+
+#: nova/compute/manager.py:1458 nova/compute/manager.py:3527
+msgid "Error trying to reschedule"
+msgstr ""
+
+#: nova/compute/manager.py:1567
+#, python-format
+msgid "Instance failed network setup after %(attempts)d attempt(s)"
+msgstr ""
+
+#: nova/compute/manager.py:1761
+msgid "Instance failed block device setup"
+msgstr ""
+
+#: nova/compute/manager.py:1781 nova/compute/manager.py:2123
+#: nova/compute/manager.py:4071
+msgid "Instance failed to spawn"
+msgstr ""
+
+#: nova/compute/manager.py:1964
+msgid "Unexpected build failure, not rescheduling build."
+msgstr ""
+
+#: nova/compute/manager.py:2033 nova/compute/manager.py:2085
+msgid "Failed to allocate network(s)"
+msgstr ""
+
+#: nova/compute/manager.py:2111
+msgid "Failure prepping block device"
+msgstr ""
+
+#: nova/compute/manager.py:2144
+msgid "Failed to deallocate networks"
+msgstr ""
+
+#: nova/compute/manager.py:2374 nova/compute/manager.py:3718
+#: nova/compute/manager.py:5822
+msgid "Setting instance vm_state to ERROR"
+msgstr ""
+
+#: nova/compute/manager.py:2586 nova/compute/manager.py:4933
+#, python-format
+msgid "Failed to get compute_info for %s"
+msgstr ""
+
+#: nova/compute/manager.py:3013
+#, python-format
+msgid "set_admin_password failed: %s"
+msgstr ""
+
+#: nova/compute/manager.py:3098
+msgid "Error trying to Rescue Instance"
+msgstr ""
+
+#: nova/compute/manager.py:3724
+#, python-format
+msgid "Failed to rollback quota for failed finish_resize: %s"
+msgstr ""
+
+#: nova/compute/manager.py:4323
+#, python-format
+msgid "Failed to attach %(volume_id)s at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4362
+#, python-format
+msgid "Failed to detach volume %(volume_id)s from %(mp)s"
+msgstr ""
+
+#: nova/compute/manager.py:4441
+#, python-format
+msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s"
+msgstr ""
+
+#: nova/compute/manager.py:4448
+#, python-format
+msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4735
+#, python-format
+msgid "Pre live migration failed at %s"
+msgstr ""
+
+#: nova/compute/manager.py:5235
+msgid "Periodic task failed to offload instance."
+msgstr ""
+
+#: nova/compute/manager.py:5275
+#, python-format
+msgid "Failed to generate usage audit for instance on host %s"
+msgstr ""
+
+#: nova/compute/manager.py:5465
+msgid ""
+"Periodic sync_power_state task had an error while processing an instance."
+msgstr ""
+
+#: nova/compute/manager.py:5568 nova/compute/manager.py:5577
+#: nova/compute/manager.py:5608 nova/compute/manager.py:5619
+msgid "error during stop() in sync_power_state."
+msgstr ""
+
+#: nova/network/neutronv2/api.py:234
+#, python-format
+msgid "Neutron error creating port on network %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:418
+#, python-format
+msgid "Failed to update port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:425
+#, python-format
+msgid "Failed to delete port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524
+#, python-format
+msgid "Failed to delete neutron port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:697
+#, python-format
+msgid "Failed to access port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:931
+#, python-format
+msgid "Unable to access floating IP %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1065
+#, python-format
+msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1124
+#, python-format
+msgid "Unable to update host of port %s"
+msgstr ""
+
#: nova/objects/instance_fault.py:87
msgid "Failed to notify cells of instance fault"
msgstr "Failed to notify cells of instance fault"
@@ -58,35 +352,35 @@ msgstr "Original exception being dropped: %s"
msgid "Unexpected exception occurred %d time(s)... retrying."
msgstr "Unexpected exception occurred %d time(s)... retrying."
-#: nova/openstack/common/lockutils.py:120
+#: nova/openstack/common/lockutils.py:119
#, python-format
msgid "Could not release the acquired lock `%s`"
msgstr ""
-#: nova/openstack/common/loopingcall.py:89
+#: nova/openstack/common/loopingcall.py:95
msgid "in fixed duration looping call"
msgstr "in fixed duration looping call"
-#: nova/openstack/common/loopingcall.py:136
+#: nova/openstack/common/loopingcall.py:138
msgid "in dynamic looping call"
msgstr "in dynamic looping call"
-#: nova/openstack/common/periodic_task.py:179
+#: nova/openstack/common/periodic_task.py:202
#, python-format
msgid "Error during %(full_task_name)s: %(e)s"
msgstr "Error during %(full_task_name)s: %(e)s"
-#: nova/openstack/common/policy.py:511
+#: nova/openstack/common/policy.py:507
#, python-format
msgid "Failed to understand rule %s"
msgstr "Failed to understand rule %s"
-#: nova/openstack/common/policy.py:521
+#: nova/openstack/common/policy.py:517
#, python-format
msgid "No handler for matches of kind %s"
msgstr "No handler for matches of kind %s"
-#: nova/openstack/common/policy.py:791
+#: nova/openstack/common/policy.py:787
#, python-format
msgid "Failed to understand rule %r"
msgstr "Failed to understand rule %r"
@@ -116,170 +410,184 @@ msgstr "DB exception wrapped."
msgid "Failed to migrate to version %s on engine %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:625
+#: nova/pci/pci_stats.py:119
+msgid ""
+"Failed to allocate PCI devices for instance. Unassigning devices back to "
+"pools. This should not happen, since the scheduler should have accurate "
+"information, and allocation during claims is controlled via a hold on the "
+"compute node semaphore"
+msgstr ""
+
+#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109
+#, python-format
+msgid "PCI device %s not found"
+msgstr ""
+
+#: nova/virt/disk/api.py:388
+#, python-format
+msgid ""
+"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:639
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:749
+#: nova/virt/libvirt/driver.py:764
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:873
+#: nova/virt/libvirt/driver.py:927
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:889
-msgid "During wait destroy, instance disappeared."
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:951
+#: nova/virt/libvirt/driver.py:1005
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:977
+#: nova/virt/libvirt/driver.py:1033
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1389
+#: nova/virt/libvirt/driver.py:1444
msgid "attaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1414
+#: nova/virt/libvirt/driver.py:1471
msgid "detaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1663
+#: nova/virt/libvirt/driver.py:1726
msgid "Failed to send updated snapshot status to volume service."
msgstr ""
-#: nova/virt/libvirt/driver.py:1749
+#: nova/virt/libvirt/driver.py:1834
msgid ""
"Unable to create quiesced VM snapshot, attempting again with quiescing "
"disabled."
msgstr ""
-#: nova/virt/libvirt/driver.py:1755
+#: nova/virt/libvirt/driver.py:1840
msgid "Unable to create VM snapshot, failing volume_snapshot operation."
msgstr ""
-#: nova/virt/libvirt/driver.py:1804
+#: nova/virt/libvirt/driver.py:1889
msgid ""
"Error occurred during volume_snapshot_create, sending error status to Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:1951
+#: nova/virt/libvirt/driver.py:2111
msgid ""
"Error occurred during volume_snapshot_delete, sending error status to Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421
+#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:2542
+#: nova/virt/libvirt/driver.py:2705
#, python-format
msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2693
+#: nova/virt/libvirt/driver.py:2873
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2786
+#: nova/virt/libvirt/driver.py:2966
#, python-format
msgid "Attaching PCI devices %(dev)s to %(dom)s failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:3553
+#: nova/virt/libvirt/driver.py:3783
#, python-format
-msgid "An error occurred while trying to define a domain with xml: %s"
+msgid "Error defining a domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3562
+#: nova/virt/libvirt/driver.py:3787
#, python-format
-msgid "An error occurred while trying to launch a defined domain with xml: %s"
+msgid "Error launching a defined domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3571
+#: nova/virt/libvirt/driver.py:3792
#, python-format
-msgid "An error occurred while enabling hairpin mode on domain with xml: %s"
+msgid "Error enabling hairpin mode with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3589
+#: nova/virt/libvirt/driver.py:3806
#, python-format
msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3904
+#: nova/virt/libvirt/driver.py:4115
#, python-format
msgid ""
"Hostname has changed from %(old)s to %(new)s. A restart is required to take "
"effect."
msgstr ""
-#: nova/virt/libvirt/driver.py:4481
+#: nova/virt/libvirt/driver.py:4794
#, python-format
msgid "Live Migration failure: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5231
+#: nova/virt/libvirt/driver.py:5596
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:202
+#: nova/virt/libvirt/imagebackend.py:200
#, python-format
msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:230
+#: nova/virt/libvirt/imagebackend.py:227
#, python-format
msgid ""
"%(base)s virtual size %(base_size)s larger than flavor root disk size "
"%(size)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:501
-#, python-format
-msgid "error opening rbd image %s"
-msgstr ""
-
-#: nova/virt/libvirt/imagecache.py:130
+#: nova/virt/libvirt/imagecache.py:129
#, python-format
msgid "Error reading image info file %(filename)s: %(error)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:391
+#: nova/virt/libvirt/imagecache.py:390
#, python-format
msgid "image %(id)s at (%(base_file)s): image verification failed"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:448
+#: nova/virt/libvirt/imagecache.py:447
#, python-format
msgid "Failed to remove %(base_file)s, error was %(error)s"
msgstr ""
-#: nova/virt/libvirt/lvm.py:201
+#: nova/virt/libvirt/lvm.py:200
#, python-format
msgid "ignoring unrecognized volume_clear='%s' value"
msgstr ""
-#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572
-#: nova/virt/libvirt/vif.py:596
+#: nova/virt/libvirt/rbd_utils.py:62
+#, python-format
+msgid "error opening rbd image %s"
+msgstr ""
+
+#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474
+#: nova/virt/libvirt/vif.py:496
msgid "Failed while plugging vif"
msgstr ""
-#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676
-#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717
-#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762
-#: nova/virt/libvirt/vif.py:784
+#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560
+#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598
+#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639
msgid "Failed while unplugging vif"
msgstr ""
@@ -288,12 +596,28 @@ msgstr ""
msgid "Unknown content in connection_info/access_mode: %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:666
+#: nova/virt/libvirt/volume.py:669
#, python-format
msgid "Couldn't unmount the NFS share %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:815
+#: nova/virt/libvirt/volume.py:818
#, python-format
msgid "Couldn't unmount the GlusterFS share %s"
msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:508
+#, python-format
+msgid ""
+"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1551
+#, python-format
+msgid "Attaching network adapter failed. Exception: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1591
+#, python-format
+msgid "Detaching network adapter failed. Exception: %s"
+msgstr ""
diff --git a/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po b/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po
index ef67439723..b726cf27ed 100644
--- a/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/en_GB/LC_MESSAGES/nova-log-info.po
@@ -3,12 +3,13 @@
# This file is distributed under the same license as the nova project.
#
# Translators:
+# Andi Chandler , 2014
msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
-"PO-Revision-Date: 2014-06-30 05:01+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
+"PO-Revision-Date: 2014-08-15 05:00+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: English (United Kingdom) (http://www.transifex.com/projects/p/"
"nova/language/en_GB/)\n"
@@ -19,27 +20,78 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+#: nova/api/openstack/__init__.py:101
+#, python-format
+msgid "%(url)s returned with HTTP %(status)d"
+msgstr "%(url)s returned with HTTP %(status)d"
+
+#: nova/api/openstack/__init__.py:294
+msgid "V3 API has been disabled by configuration"
+msgstr "V3 API has been disabled by configuration"
+
+#: nova/api/openstack/wsgi.py:688
+#, python-format
+msgid "Fault thrown: %s"
+msgstr "Fault thrown: %s"
+
+#: nova/api/openstack/wsgi.py:691
+#, python-format
+msgid "HTTP exception thrown: %s"
+msgstr "HTTP exception thrown: %s"
+
+#: nova/api/openstack/compute/contrib/os_networks.py:101
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128
+#, python-format
+msgid "Deleting network with id %s"
+msgstr "Deleting network with id %s"
+
+#: nova/compute/manager.py:2663
+#, python-format
+msgid "bringing vm to original state: '%s'"
+msgstr "bringing vm to original state: '%s'"
+
+#: nova/compute/manager.py:5471
+#, python-format
+msgid ""
+"During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+"During sync_power_state the instance has a pending task (%(task)s). Skip."
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36
+msgid ""
+"Skipped adding reservations_deleted_expire_idx because an equivalent index "
+"already exists."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58
+msgid ""
+"Skipped removing reservations_deleted_expire_idx because index does not "
+"exist."
+msgstr ""
+
#: nova/openstack/common/eventlet_backdoor.py:141
#, python-format
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
msgstr "Eventlet backdoor listening on %(port)s for process %(pid)d"
-#: nova/openstack/common/lockutils.py:83
+#: nova/openstack/common/lockutils.py:82
#, python-format
msgid "Created lock path: %s"
msgstr "Created lock path: %s"
-#: nova/openstack/common/lockutils.py:250
+#: nova/openstack/common/lockutils.py:251
#, python-format
msgid "Failed to remove file %(file)s"
-msgstr ""
+msgstr "Failed to remove file %(file)s"
-#: nova/openstack/common/periodic_task.py:125
+#: nova/openstack/common/periodic_task.py:126
#, python-format
msgid "Skipping periodic task %(task)s because its interval is negative"
msgstr "Skipping periodic task %(task)s because its interval is negative"
-#: nova/openstack/common/periodic_task.py:130
+#: nova/openstack/common/periodic_task.py:131
#, python-format
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr "Skipping periodic task %(task)s because it is disabled"
@@ -84,7 +136,7 @@ msgstr "Caught %s, stopping children"
#: nova/openstack/common/service.py:403
msgid "Wait called after thread killed. Cleaning up."
-msgstr ""
+msgstr "Wait called after thread killed. Cleaning up."
#: nova/openstack/common/service.py:414
#, python-format
@@ -99,97 +151,114 @@ msgstr "Deleting duplicated row with id: %(id)s from table: %(table)s"
#: nova/scheduler/filters/utils.py:50
#, python-format
msgid "%(num_values)d values found, of which the minimum value will be used."
+msgstr "%(num_values)d values found, of which the minimum value will be used."
+
+#: nova/virt/block_device.py:221
+#, python-format
+msgid "preserve multipath_id %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:894
+#: nova/virt/firewall.py:444
+#, python-format
+msgid "instance chain %s disappeared during refresh, skipping"
+msgstr "instance chain %s disappeared during refresh, skipping"
+
+#: nova/virt/disk/vfs/guestfs.py:139
+msgid "Unable to force TCG mode, libguestfs too old?"
+msgstr "Unable to force TCG mode, libguestfs too old?"
+
+#: nova/virt/libvirt/driver.py:835
+#, python-format
+msgid ""
+"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s"
+msgstr ""
+"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s"
+
+#: nova/virt/libvirt/driver.py:948
msgid "Instance destroyed successfully."
msgstr "Instance destroyed successfully."
-#: nova/virt/libvirt/driver.py:904
+#: nova/virt/libvirt/driver.py:958
msgid "Instance may be started again."
msgstr "Instance may be started again."
-#: nova/virt/libvirt/driver.py:914
+#: nova/virt/libvirt/driver.py:968
msgid "Going to destroy instance again."
msgstr "Going to destroy instance again."
-#: nova/virt/libvirt/driver.py:1518
+#: nova/virt/libvirt/driver.py:1576
msgid "Beginning live snapshot process"
msgstr "Beginning live snapshot process"
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1579
msgid "Beginning cold snapshot process"
msgstr "Beginning cold snapshot process"
-#: nova/virt/libvirt/driver.py:1550
+#: nova/virt/libvirt/driver.py:1608
msgid "Snapshot extracted, beginning image upload"
msgstr "Snapshot extracted, beginning image upload"
-#: nova/virt/libvirt/driver.py:1562
+#: nova/virt/libvirt/driver.py:1620
msgid "Snapshot image upload complete"
msgstr "Snapshot image upload complete"
-#: nova/virt/libvirt/driver.py:1972
+#: nova/virt/libvirt/driver.py:2132
msgid "Instance soft rebooted successfully."
msgstr "Instance soft rebooted successfully."
-#: nova/virt/libvirt/driver.py:2015
+#: nova/virt/libvirt/driver.py:2175
msgid "Instance shutdown successfully."
msgstr "Instance shutdown successfully."
-#: nova/virt/libvirt/driver.py:2023
+#: nova/virt/libvirt/driver.py:2183
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr "Instance may have been rebooted during soft reboot, so return now."
-#: nova/virt/libvirt/driver.py:2091
+#: nova/virt/libvirt/driver.py:2252
msgid "Instance rebooted successfully."
msgstr "Instance rebooted successfully."
-#: nova/virt/libvirt/driver.py:2259
+#: nova/virt/libvirt/driver.py:2420
msgid "Instance spawned successfully."
msgstr "Instance spawned successfully."
-#: nova/virt/libvirt/driver.py:2275
+#: nova/virt/libvirt/driver.py:2436
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr "data: %(data)r, fpath: %(fpath)r"
-#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341
+#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr "Truncated console log returned, %d bytes ignored"
-#: nova/virt/libvirt/driver.py:2568
+#: nova/virt/libvirt/driver.py:2731
msgid "Creating image"
msgstr "Creating image"
-#: nova/virt/libvirt/driver.py:2677
+#: nova/virt/libvirt/driver.py:2857
msgid "Using config drive"
msgstr "Using config drive"
-#: nova/virt/libvirt/driver.py:2686
+#: nova/virt/libvirt/driver.py:2866
#, python-format
msgid "Creating config drive at %(path)s"
msgstr "Creating config drive at %(path)s"
-#: nova/virt/libvirt/driver.py:3223
+#: nova/virt/libvirt/driver.py:3437
msgid "Configuring timezone for windows instance to localtime"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821
-#: nova/virt/libvirt/driver.py:3849
-#, python-format
-msgid "libvirt can't find a domain with id: %s"
-msgstr ""
+msgstr "Configuring timezone for windows instance to localtime"
-#: nova/virt/libvirt/driver.py:4109
+#: nova/virt/libvirt/driver.py:4320
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
msgstr ""
+"Getting block stats failed, device might have been detached. Instance="
+"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
-#: nova/virt/libvirt/driver.py:4115
+#: nova/virt/libvirt/driver.py:4326
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats for "
@@ -198,75 +267,75 @@ msgstr ""
"Could not find domain in libvirt for instance %s. Cannot get block stats for "
"device"
-#: nova/virt/libvirt/driver.py:4330
+#: nova/virt/libvirt/driver.py:4568
#, python-format
msgid "Instance launched has CPU info: %s"
-msgstr ""
+msgstr "Instance launched has CPU info: %s"
-#: nova/virt/libvirt/driver.py:4986
+#: nova/virt/libvirt/driver.py:5316
msgid "Instance running successfully."
msgstr "Instance running successfully."
-#: nova/virt/libvirt/driver.py:5226
+#: nova/virt/libvirt/driver.py:5590
#, python-format
msgid "Deleting instance files %s"
-msgstr ""
+msgstr "Deleting instance files %s"
-#: nova/virt/libvirt/driver.py:5238
+#: nova/virt/libvirt/driver.py:5603
#, python-format
msgid "Deletion of %s failed"
-msgstr ""
+msgstr "Deletion of %s failed"
-#: nova/virt/libvirt/driver.py:5241
+#: nova/virt/libvirt/driver.py:5607
#, python-format
msgid "Deletion of %s complete"
-msgstr ""
+msgstr "Deletion of %s complete"
-#: nova/virt/libvirt/firewall.py:105
+#: nova/virt/libvirt/firewall.py:106
msgid "Called setup_basic_filtering in nwfilter"
msgstr "Called setup_basic_filtering in nwfilter"
-#: nova/virt/libvirt/firewall.py:113
+#: nova/virt/libvirt/firewall.py:114
msgid "Ensuring static filters"
msgstr "Ensuring static filters"
-#: nova/virt/libvirt/firewall.py:306
+#: nova/virt/libvirt/firewall.py:305
msgid "Attempted to unfilter instance which is not filtered"
msgstr "Attempted to unfilter instance which is not filtered"
-#: nova/virt/libvirt/imagecache.py:191
+#: nova/virt/libvirt/imagecache.py:190
#, python-format
msgid "Writing stored info to %s"
msgstr "Writing stored info to %s"
-#: nova/virt/libvirt/imagecache.py:401
+#: nova/virt/libvirt/imagecache.py:400
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash stored"
msgstr ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash stored"
-#: nova/virt/libvirt/imagecache.py:410
+#: nova/virt/libvirt/imagecache.py:409
#, python-format
msgid "%(id)s (%(base_file)s): generating checksum"
msgstr "%(id)s (%(base_file)s): generating checksum"
-#: nova/virt/libvirt/imagecache.py:438
+#: nova/virt/libvirt/imagecache.py:437
#, python-format
msgid "Base file too young to remove: %s"
msgstr "Base file too young to remove: %s"
-#: nova/virt/libvirt/imagecache.py:441
+#: nova/virt/libvirt/imagecache.py:440
#, python-format
msgid "Removing base file: %s"
msgstr "Removing base file: %s"
-#: nova/virt/libvirt/imagecache.py:459
+#: nova/virt/libvirt/imagecache.py:458
#, python-format
msgid "image %(id)s at (%(base_file)s): checking"
msgstr "image %(id)s at (%(base_file)s): checking"
-#: nova/virt/libvirt/imagecache.py:483
+#: nova/virt/libvirt/imagecache.py:482
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
@@ -275,28 +344,30 @@ msgstr ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
"%(remote)d on other nodes sharing this instance storage"
-#: nova/virt/libvirt/imagecache.py:550
+#: nova/virt/libvirt/imagecache.py:549
#, python-format
msgid "Active base files: %s"
msgstr "Active base files: %s"
-#: nova/virt/libvirt/imagecache.py:553
+#: nova/virt/libvirt/imagecache.py:552
#, python-format
msgid "Corrupt base files: %s"
msgstr "Corrupt base files: %s"
-#: nova/virt/libvirt/imagecache.py:557
+#: nova/virt/libvirt/imagecache.py:556
#, python-format
msgid "Removable base files: %s"
msgstr "Removable base files: %s"
-#: nova/virt/libvirt/utils.py:530
+#: nova/virt/libvirt/utils.py:490
msgid "findmnt tool is not installed"
-msgstr ""
+msgstr "findmnt tool is not installed"
-#: nova/virt/xenapi/vm_utils.py:1352
+#: nova/virt/xenapi/vm_utils.py:1355
#, python-format
msgid ""
"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s "
"duration: %(duration).2f secs for image %(image_id)s"
msgstr ""
+"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s "
+"duration: %(duration).2f secs for image %(image_id)s"
diff --git a/nova/locale/en_US/LC_MESSAGES/nova.po b/nova/locale/en_US/LC_MESSAGES/nova.po
index e94fd6ea3f..43a7079bff 100644
--- a/nova/locale/en_US/LC_MESSAGES/nova.po
+++ b/nova/locale/en_US/LC_MESSAGES/nova.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: Nova\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/nova\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
"PO-Revision-Date: 2013-01-21 18:28+0000\n"
"Last-Translator: Jeremy Stanley \n"
"Language-Team: en_US \n"
@@ -17,39 +17,43 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 1.3\n"
-#: nova/block_device.py:99
+#: nova/block_device.py:102
msgid "Some fields are invalid."
msgstr ""
-#: nova/block_device.py:109
+#: nova/block_device.py:112
msgid "Some required fields are missing"
msgstr ""
-#: nova/block_device.py:125
+#: nova/block_device.py:128
msgid "Boot index is invalid."
msgstr ""
-#: nova/block_device.py:168
+#: nova/block_device.py:171
msgid "Unrecognized legacy format."
msgstr ""
-#: nova/block_device.py:185
+#: nova/block_device.py:188
msgid "Invalid source_type field."
msgstr ""
-#: nova/block_device.py:189
+#: nova/block_device.py:191
+msgid "Invalid device UUID."
+msgstr ""
+
+#: nova/block_device.py:195
msgid "Missing device UUID."
msgstr ""
-#: nova/block_device.py:368
+#: nova/block_device.py:374
msgid "Device name empty or too long."
msgstr ""
-#: nova/block_device.py:372
+#: nova/block_device.py:378
msgid "Device name contains spaces."
msgstr ""
-#: nova/block_device.py:382
+#: nova/block_device.py:388
msgid "Invalid volume_size."
msgstr ""
@@ -333,7 +337,7 @@ msgstr ""
msgid "Group not valid. Reason: %(reason)s"
msgstr "Group not valid. Reason: %(reason)s"
-#: nova/exception.py:345 nova/openstack/common/db/sqlalchemy/utils.py:58
+#: nova/exception.py:345 nova/openstack/common/db/sqlalchemy/utils.py:57
msgid "Sort key supplied was not valid."
msgstr "Sort key supplied was not valid."
@@ -406,49 +410,49 @@ msgstr ""
msgid "Failed to deploy instance: %(reason)s"
msgstr ""
-#: nova/exception.py:402
+#: nova/exception.py:402 nova/exception.py:406
#, python-format
msgid "Failed to launch instances: %(reason)s"
msgstr ""
-#: nova/exception.py:406
+#: nova/exception.py:410
msgid "Service is unavailable at this time."
msgstr "Service is unavailable at this time."
-#: nova/exception.py:410
+#: nova/exception.py:414
#, python-format
msgid "Insufficient compute resources: %(reason)s."
msgstr ""
-#: nova/exception.py:414
+#: nova/exception.py:418
#, python-format
msgid "Connection to the hypervisor is broken on host: %(host)s"
msgstr ""
-#: nova/exception.py:418
+#: nova/exception.py:422
#, fuzzy, python-format
msgid "Compute service of %(host)s is unavailable at this time."
msgstr "Compute service is unavailable at this time."
-#: nova/exception.py:422
+#: nova/exception.py:426
#, python-format
msgid "Compute service of %(host)s is still in use."
msgstr ""
-#: nova/exception.py:426
+#: nova/exception.py:430
#, python-format
msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
msgstr "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
-#: nova/exception.py:431
+#: nova/exception.py:435
msgid "The supplied hypervisor type of is invalid."
msgstr "The supplied hypervisor type of is invalid."
-#: nova/exception.py:435
+#: nova/exception.py:439
msgid "The instance requires a newer hypervisor version than has been provided."
msgstr "The instance requires a newer hypervisor version than has been provided."
-#: nova/exception.py:440
+#: nova/exception.py:444
#, python-format
msgid ""
"The supplied disk path (%(path)s) already exists, it is expected not to "
@@ -457,32 +461,32 @@ msgstr ""
"The supplied disk path (%(path)s) already exists, it is expected not to "
"exist."
-#: nova/exception.py:445
+#: nova/exception.py:449
#, python-format
msgid "The supplied device path (%(path)s) is invalid."
msgstr "The supplied device path (%(path)s) is invalid."
-#: nova/exception.py:449
+#: nova/exception.py:453
#, python-format
msgid "The supplied device path (%(path)s) is in use."
msgstr "The supplied device path (%(path)s) is in use."
-#: nova/exception.py:454
+#: nova/exception.py:458
#, python-format
msgid "The supplied device (%(device)s) is busy."
msgstr "The supplied device (%(device)s) is busy."
-#: nova/exception.py:458
+#: nova/exception.py:462
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr ""
-#: nova/exception.py:462
+#: nova/exception.py:466
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
msgstr "%(address)s is not a valid IP v4/6 address."
-#: nova/exception.py:466
+#: nova/exception.py:470
#, python-format
msgid ""
"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN "
@@ -491,7 +495,7 @@ msgstr ""
"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN "
"tag is %(tag)s, but the one associated with the port group is %(pgroup)s."
-#: nova/exception.py:472
+#: nova/exception.py:476
#, python-format
msgid ""
"vSwitch which contains the port group %(bridge)s is not associated with "
@@ -502,111 +506,111 @@ msgstr ""
"the desired physical adapter. Expected vSwitch is %(expected)s, but the "
"one associated is %(actual)s."
-#: nova/exception.py:479
+#: nova/exception.py:483
#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "Disk format %(disk_format)s is not acceptable"
-#: nova/exception.py:483
+#: nova/exception.py:487
#, python-format
msgid "Disk info file is invalid: %(reason)s"
msgstr ""
-#: nova/exception.py:487
+#: nova/exception.py:491
#, python-format
msgid "Failed to read or write disk info file: %(reason)s"
msgstr ""
-#: nova/exception.py:491
+#: nova/exception.py:495
#, python-format
msgid "Image %(image_id)s is unacceptable: %(reason)s"
msgstr "Image %(image_id)s is unacceptable: %(reason)s"
-#: nova/exception.py:495
+#: nova/exception.py:499
#, python-format
msgid "Instance %(instance_id)s is unacceptable: %(reason)s"
msgstr "Instance %(instance_id)s is unacceptable: %(reason)s"
-#: nova/exception.py:499
+#: nova/exception.py:503
#, python-format
msgid "Ec2 id %(ec2_id)s is unacceptable."
msgstr "Ec2 id %(ec2_id)s is unacceptable."
-#: nova/exception.py:503
+#: nova/exception.py:507
#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr "Expected a uuid but received %(uuid)s."
-#: nova/exception.py:507
+#: nova/exception.py:511
#, fuzzy, python-format
msgid "Invalid ID received %(id)s."
msgstr "Invalid cidr %(cidr)s."
-#: nova/exception.py:511
+#: nova/exception.py:515
msgid "Constraint not met."
msgstr "Constraint not met."
-#: nova/exception.py:516
+#: nova/exception.py:520
msgid "Resource could not be found."
msgstr "Resource could not be found."
-#: nova/exception.py:521
+#: nova/exception.py:525
#, fuzzy, python-format
msgid "No agent-build associated with id %(id)s."
msgstr "No fixed IP associated with id %(id)s."
-#: nova/exception.py:525
+#: nova/exception.py:529
#, python-format
msgid ""
"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
"%(architecture)s exists."
msgstr ""
-#: nova/exception.py:531
+#: nova/exception.py:535
#, python-format
msgid "Volume %(volume_id)s could not be found."
msgstr "Volume %(volume_id)s could not be found."
-#: nova/exception.py:535
+#: nova/exception.py:539
#, python-format
msgid "No volume Block Device Mapping with id %(volume_id)s."
msgstr ""
-#: nova/exception.py:540
+#: nova/exception.py:544
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "Snapshot %(snapshot_id)s could not be found."
-#: nova/exception.py:544
+#: nova/exception.py:548
#, python-format
msgid "No disk at %(location)s"
msgstr "No disk at %(location)s"
-#: nova/exception.py:548
+#: nova/exception.py:552
#, python-format
msgid "Could not find a handler for %(driver_type)s volume."
msgstr "Could not find a handler for %(driver_type)s volume."
-#: nova/exception.py:552
+#: nova/exception.py:556
#, python-format
msgid "Invalid image href %(image_href)s."
msgstr "Invalid image href %(image_href)s."
-#: nova/exception.py:556
+#: nova/exception.py:560
#, python-format
msgid "Requested image %(image)s has automatic disk resize disabled."
msgstr ""
-#: nova/exception.py:561
+#: nova/exception.py:565
#, python-format
msgid "Image %(image_id)s could not be found."
msgstr "Image %(image_id)s could not be found."
-#: nova/exception.py:565
+#: nova/exception.py:569
msgid "The current driver does not support preserving ephemeral partitions."
msgstr ""
-#: nova/exception.py:571
+#: nova/exception.py:575
#, python-format
msgid ""
"Image %(image_id)s could not be found. The nova EC2 API assigns image ids"
@@ -617,148 +621,153 @@ msgstr ""
" dynamically when they are listed for the first time. Have you listed "
"image ids since adding this image?"
-#: nova/exception.py:578
+#: nova/exception.py:582
#, python-format
msgid "Project %(project_id)s could not be found."
msgstr "Project %(project_id)s could not be found."
-#: nova/exception.py:582
+#: nova/exception.py:586
msgid "Cannot find SR to read/write VDI."
msgstr "Cannot find SR to read/write VDI."
-#: nova/exception.py:586
+#: nova/exception.py:590
#, fuzzy, python-format
msgid "Network %(network_id)s is duplicated."
msgstr "Network %(network_id)s is still in use."
-#: nova/exception.py:590
+#: nova/exception.py:594
#, python-format
msgid "Network %(network_id)s is still in use."
msgstr "Network %(network_id)s is still in use."
-#: nova/exception.py:594
+#: nova/exception.py:598
#, python-format
msgid "%(req)s is required to create a network."
msgstr "%(req)s is required to create a network."
-#: nova/exception.py:598
+#: nova/exception.py:602
#, python-format
msgid "Network %(network_id)s could not be found."
msgstr "Network %(network_id)s could not be found."
-#: nova/exception.py:602
+#: nova/exception.py:606
#, fuzzy, python-format
msgid "Port id %(port_id)s could not be found."
msgstr "Port %(port_id)s could not be found."
-#: nova/exception.py:606
+#: nova/exception.py:610
#, python-format
msgid "Network could not be found for bridge %(bridge)s"
msgstr "Network could not be found for bridge %(bridge)s"
-#: nova/exception.py:610
+#: nova/exception.py:614
#, python-format
msgid "Network could not be found for uuid %(uuid)s"
msgstr "Network could not be found for uuid %(uuid)s"
-#: nova/exception.py:614
+#: nova/exception.py:618
#, python-format
msgid "Network could not be found with cidr %(cidr)s."
msgstr "Network could not be found with cidr %(cidr)s."
-#: nova/exception.py:618
+#: nova/exception.py:622
#, python-format
msgid "Network could not be found for instance %(instance_id)s."
msgstr "Network could not be found for instance %(instance_id)s."
-#: nova/exception.py:622
+#: nova/exception.py:626
msgid "No networks defined."
msgstr "No networks defined."
-#: nova/exception.py:626
+#: nova/exception.py:630
msgid "No more available networks."
msgstr ""
-#: nova/exception.py:630
+#: nova/exception.py:634
#, python-format
msgid ""
"Either network uuid %(network_uuid)s is not present or is not assigned to"
" the project %(project_id)s."
msgstr ""
-#: nova/exception.py:635
+#: nova/exception.py:639
msgid ""
"More than one possible network found. Specify network ID(s) to select "
"which one(s) to connect to,"
msgstr ""
-#: nova/exception.py:640
+#: nova/exception.py:644
#, python-format
msgid "Network %(network_uuid)s requires a subnet in order to boot instances on."
msgstr ""
-#: nova/exception.py:645
+#: nova/exception.py:649
#, python-format
msgid ""
"It is not allowed to create an interface on external network "
"%(network_uuid)s"
msgstr ""
-#: nova/exception.py:650
+#: nova/exception.py:654
+#, python-format
+msgid "Physical network is missing for network %(network_uuid)s"
+msgstr ""
+
+#: nova/exception.py:658
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr "Could not find the datastore reference(s) which the VM uses."
-#: nova/exception.py:654
+#: nova/exception.py:662
#, python-format
msgid "Port %(port_id)s is still in use."
msgstr "Port %(port_id)s is still in use."
-#: nova/exception.py:658
+#: nova/exception.py:666
#, python-format
msgid "Port %(port_id)s requires a FixedIP in order to be used."
msgstr ""
-#: nova/exception.py:662
+#: nova/exception.py:670
#, fuzzy, python-format
msgid "Port %(port_id)s not usable for instance %(instance)s."
msgstr "Network could not be found for instance %(instance_id)s."
-#: nova/exception.py:666
+#: nova/exception.py:674
#, fuzzy, python-format
msgid "No free port available for instance %(instance)s."
msgstr "Network could not be found for instance %(instance_id)s."
-#: nova/exception.py:670
+#: nova/exception.py:678
#, python-format
msgid "Fixed ip %(address)s already exists."
msgstr ""
-#: nova/exception.py:674
+#: nova/exception.py:682
#, python-format
msgid "No fixed IP associated with id %(id)s."
msgstr "No fixed IP associated with id %(id)s."
-#: nova/exception.py:678
+#: nova/exception.py:686
#, python-format
msgid "Fixed ip not found for address %(address)s."
msgstr "Fixed ip not found for address %(address)s."
-#: nova/exception.py:682
+#: nova/exception.py:690
#, python-format
msgid "Instance %(instance_uuid)s has zero fixed ips."
msgstr "Instance %(instance_uuid)s has zero fixed ips."
-#: nova/exception.py:686
+#: nova/exception.py:694
#, python-format
msgid "Network host %(host)s has zero fixed ips in network %(network_id)s."
msgstr "Network host %(host)s has zero fixed ips in network %(network_id)s."
-#: nova/exception.py:691
+#: nova/exception.py:699
#, python-format
msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'."
msgstr "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'."
-#: nova/exception.py:695
+#: nova/exception.py:703
#, python-format
msgid ""
"Fixed IP address (%(address)s) does not exist in network "
@@ -767,7 +776,7 @@ msgstr ""
"Fixed IP address (%(address)s) does not exist in network "
"(%(network_uuid)s)."
-#: nova/exception.py:700
+#: nova/exception.py:708
#, python-format
msgid ""
"Fixed IP address %(address)s is already in use on instance "
@@ -776,126 +785,126 @@ msgstr ""
"Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s."
-#: nova/exception.py:705
+#: nova/exception.py:713
#, python-format
msgid "More than one instance is associated with fixed ip address '%(address)s'."
msgstr "More than one instance is associated with fixed ip address '%(address)s'."
-#: nova/exception.py:710
+#: nova/exception.py:718
#, python-format
msgid "Fixed IP address %(address)s is invalid."
msgstr "Fixed IP address %(address)s is invalid."
-#: nova/exception.py:715
+#: nova/exception.py:723
msgid "Zero fixed ips available."
msgstr "Zero fixed ips available."
-#: nova/exception.py:719
+#: nova/exception.py:727
msgid "Zero fixed ips could be found."
msgstr "Zero fixed ips could be found."
-#: nova/exception.py:723
+#: nova/exception.py:731
#, python-format
msgid "Floating ip %(address)s already exists."
msgstr "Floating ip %(address)s already exists."
-#: nova/exception.py:728
+#: nova/exception.py:736
#, python-format
msgid "Floating ip not found for id %(id)s."
msgstr "Floating ip not found for id %(id)s."
-#: nova/exception.py:732
+#: nova/exception.py:740
#, python-format
msgid "The DNS entry %(name)s already exists in domain %(domain)s."
msgstr "The DNS entry %(name)s already exists in domain %(domain)s."
-#: nova/exception.py:736
+#: nova/exception.py:744
#, python-format
msgid "Floating ip not found for address %(address)s."
msgstr "Floating ip not found for address %(address)s."
-#: nova/exception.py:740
+#: nova/exception.py:748
#, python-format
msgid "Floating ip not found for host %(host)s."
msgstr "Floating ip not found for host %(host)s."
-#: nova/exception.py:744
+#: nova/exception.py:752
#, python-format
msgid "Multiple floating ips are found for address %(address)s."
msgstr "Multiple floating ips are found for address %(address)s."
-#: nova/exception.py:748
+#: nova/exception.py:756
msgid "Floating ip pool not found."
msgstr "Floating ip pool not found."
-#: nova/exception.py:753
+#: nova/exception.py:761
msgid "Zero floating ips available."
msgstr "Zero floating ips available."
-#: nova/exception.py:759
+#: nova/exception.py:767
#, python-format
msgid "Floating ip %(address)s is associated."
msgstr "Floating ip %(address)s is associated."
-#: nova/exception.py:763
+#: nova/exception.py:771
#, python-format
msgid "Floating ip %(address)s is not associated."
msgstr "Floating ip %(address)s is not associated."
-#: nova/exception.py:767
+#: nova/exception.py:775
msgid "Zero floating ips exist."
msgstr "Zero floating ips exist."
-#: nova/exception.py:772
+#: nova/exception.py:780
#, python-format
msgid "Interface %(interface)s not found."
msgstr "Interface %(interface)s not found."
-#: nova/exception.py:777 nova/api/openstack/compute/contrib/floating_ips.py:97
+#: nova/exception.py:785 nova/api/openstack/compute/contrib/floating_ips.py:98
msgid "Cannot disassociate auto assigned floating ip"
msgstr "Cannot disassociate auto assigned floating ip"
-#: nova/exception.py:782
+#: nova/exception.py:790
#, python-format
msgid "Keypair %(name)s not found for user %(user_id)s"
msgstr "Keypair %(name)s not found for user %(user_id)s"
-#: nova/exception.py:786
+#: nova/exception.py:794
#, python-format
msgid "Service %(service_id)s could not be found."
msgstr "Service %(service_id)s could not be found."
-#: nova/exception.py:790
+#: nova/exception.py:798
#, python-format
msgid "Service with host %(host)s binary %(binary)s exists."
msgstr ""
-#: nova/exception.py:794
+#: nova/exception.py:802
#, python-format
msgid "Service with host %(host)s topic %(topic)s exists."
msgstr ""
-#: nova/exception.py:798
+#: nova/exception.py:806
#, python-format
msgid "Host %(host)s could not be found."
msgstr "Host %(host)s could not be found."
-#: nova/exception.py:802
+#: nova/exception.py:810
#, python-format
msgid "Compute host %(host)s could not be found."
msgstr "Compute host %(host)s could not be found."
-#: nova/exception.py:806
+#: nova/exception.py:814
#, python-format
msgid "Could not find binary %(binary)s on host %(host)s."
msgstr "Could not find binary %(binary)s on host %(host)s."
-#: nova/exception.py:810
+#: nova/exception.py:818
#, python-format
msgid "Invalid reservation expiration %(expire)s."
msgstr "Invalid reservation expiration %(expire)s."
-#: nova/exception.py:814
+#: nova/exception.py:822
#, python-format
msgid ""
"Change would make usage less than 0 for the following resources: "
@@ -904,73 +913,78 @@ msgstr ""
"Change would make usage less than 0 for the following resources: "
"%(unders)s"
-#: nova/exception.py:819
+#: nova/exception.py:827
+#, python-format
+msgid "Wrong quota method %(method)s used on resource %(res)s"
+msgstr ""
+
+#: nova/exception.py:831
msgid "Quota could not be found"
msgstr "Quota could not be found"
-#: nova/exception.py:823
+#: nova/exception.py:835
#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr ""
-#: nova/exception.py:828
+#: nova/exception.py:840
#, python-format
msgid "Unknown quota resources %(unknown)s."
msgstr "Unknown quota resources %(unknown)s."
-#: nova/exception.py:832
+#: nova/exception.py:844
#, python-format
msgid "Quota for user %(user_id)s in project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:837
+#: nova/exception.py:849
#, python-format
msgid "Quota for project %(project_id)s could not be found."
msgstr "Quota for project %(project_id)s could not be found."
-#: nova/exception.py:841
+#: nova/exception.py:853
#, python-format
msgid "Quota class %(class_name)s could not be found."
msgstr "Quota class %(class_name)s could not be found."
-#: nova/exception.py:845
+#: nova/exception.py:857
#, python-format
msgid "Quota usage for project %(project_id)s could not be found."
msgstr "Quota usage for project %(project_id)s could not be found."
-#: nova/exception.py:849
+#: nova/exception.py:861
#, python-format
msgid "Quota reservation %(uuid)s could not be found."
msgstr "Quota reservation %(uuid)s could not be found."
-#: nova/exception.py:853
+#: nova/exception.py:865
#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr "Quota exceeded for resources: %(overs)s"
-#: nova/exception.py:857
+#: nova/exception.py:869
#, python-format
msgid "Security group %(security_group_id)s not found."
msgstr "Security group %(security_group_id)s not found."
-#: nova/exception.py:861
+#: nova/exception.py:873
#, python-format
msgid "Security group %(security_group_id)s not found for project %(project_id)s."
msgstr "Security group %(security_group_id)s not found for project %(project_id)s."
-#: nova/exception.py:866
+#: nova/exception.py:878
#, python-format
msgid "Security group with rule %(rule_id)s not found."
msgstr "Security group with rule %(rule_id)s not found."
-#: nova/exception.py:871
+#: nova/exception.py:883
#, python-format
msgid ""
"Security group %(security_group_name)s already exists for project "
"%(project_id)s."
msgstr ""
-#: nova/exception.py:876
+#: nova/exception.py:888
#, python-format
msgid ""
"Security group %(security_group_id)s is already associated with the "
@@ -979,7 +993,7 @@ msgstr ""
"Security group %(security_group_id)s is already associated with the "
"instance %(instance_id)s"
-#: nova/exception.py:881
+#: nova/exception.py:893
#, python-format
msgid ""
"Security group %(security_group_id)s is not associated with the instance "
@@ -988,49 +1002,49 @@ msgstr ""
"Security group %(security_group_id)s is not associated with the instance "
"%(instance_id)s"
-#: nova/exception.py:886
+#: nova/exception.py:898
#, fuzzy, python-format
msgid "Security group default rule (%rule_id)s not found."
msgstr "Security group with rule %(rule_id)s not found."
-#: nova/exception.py:890
+#: nova/exception.py:902
msgid ""
"Network requires port_security_enabled and subnet associated in order to "
"apply security groups."
msgstr ""
-#: nova/exception.py:896
+#: nova/exception.py:908
#, python-format
msgid "Rule already exists in group: %(rule)s"
msgstr ""
-#: nova/exception.py:900
+#: nova/exception.py:912
msgid "No Unique Match Found."
msgstr ""
-#: nova/exception.py:905
+#: nova/exception.py:917
#, python-format
msgid "Migration %(migration_id)s could not be found."
msgstr "Migration %(migration_id)s could not be found."
-#: nova/exception.py:909
+#: nova/exception.py:921
#, python-format
msgid "Migration not found for instance %(instance_id)s with status %(status)s."
msgstr "Migration not found for instance %(instance_id)s with status %(status)s."
-#: nova/exception.py:914
+#: nova/exception.py:926
#, python-format
msgid "Console pool %(pool_id)s could not be found."
msgstr "Console pool %(pool_id)s could not be found."
-#: nova/exception.py:918
+#: nova/exception.py:930
#, python-format
msgid ""
"Console pool with host %(host)s, console_type %(console_type)s and "
"compute_host %(compute_host)s already exists."
msgstr ""
-#: nova/exception.py:924
+#: nova/exception.py:936
#, python-format
msgid ""
"Console pool of type %(console_type)s for compute host %(compute_host)s "
@@ -1039,17 +1053,17 @@ msgstr ""
"Console pool of type %(console_type)s for compute host %(compute_host)s "
"on proxy host %(host)s not found."
-#: nova/exception.py:930
+#: nova/exception.py:942
#, python-format
msgid "Console %(console_id)s could not be found."
msgstr "Console %(console_id)s could not be found."
-#: nova/exception.py:934
+#: nova/exception.py:946
#, python-format
msgid "Console for instance %(instance_uuid)s could not be found."
msgstr "Console for instance %(instance_uuid)s could not be found."
-#: nova/exception.py:938
+#: nova/exception.py:950
#, python-format
msgid ""
"Console for instance %(instance_uuid)s in pool %(pool_id)s could not be "
@@ -1058,237 +1072,244 @@ msgstr ""
"Console for instance %(instance_uuid)s in pool %(pool_id)s could not be "
"found."
-#: nova/exception.py:943
+#: nova/exception.py:955
#, fuzzy, python-format
msgid "Invalid console type %(console_type)s"
msgstr "Invalid console type %(console_type)s "
-#: nova/exception.py:947
+#: nova/exception.py:959
#, python-format
msgid "Unavailable console type %(console_type)s."
msgstr ""
-#: nova/exception.py:951
+#: nova/exception.py:963
#, python-format
msgid "The console port range %(min_port)d-%(max_port)d is exhausted."
msgstr ""
-#: nova/exception.py:956
+#: nova/exception.py:968
#, python-format
msgid "Flavor %(flavor_id)s could not be found."
msgstr "Flavor %(flavor_id)s could not be found."
-#: nova/exception.py:960
+#: nova/exception.py:972
#, python-format
msgid "Flavor with name %(flavor_name)s could not be found."
msgstr ""
-#: nova/exception.py:964
+#: nova/exception.py:976
#, fuzzy, python-format
msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination."
msgstr "Flavor access not found for %(flavor_id) / %(project_id) combination."
-#: nova/exception.py:969
+#: nova/exception.py:981
+#, python-format
+msgid ""
+"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
+"retries."
+msgstr ""
+
+#: nova/exception.py:986
#, fuzzy, python-format
msgid "Cell %(cell_name)s doesn't exist."
msgstr "pool %s doesn't exist"
-#: nova/exception.py:973
+#: nova/exception.py:990
#, python-format
msgid "Cell with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:977
+#: nova/exception.py:994
#, python-format
msgid "Inconsistency in cell routing: %(reason)s"
msgstr ""
-#: nova/exception.py:981
+#: nova/exception.py:998
#, python-format
msgid "Service API method not found: %(detail)s"
msgstr ""
-#: nova/exception.py:985
+#: nova/exception.py:1002
#, fuzzy
msgid "Timeout waiting for response from cell"
msgstr "Timed out waiting for RPC response: %s"
-#: nova/exception.py:989
+#: nova/exception.py:1006
#, python-format
msgid "Cell message has reached maximum hop count: %(hop_count)s"
msgstr ""
-#: nova/exception.py:993
+#: nova/exception.py:1010
msgid "No cells available matching scheduling criteria."
msgstr ""
-#: nova/exception.py:997
+#: nova/exception.py:1014
msgid "Cannot update cells configuration file."
msgstr ""
-#: nova/exception.py:1001
+#: nova/exception.py:1018
#, fuzzy, python-format
msgid "Cell is not known for instance %(instance_uuid)s"
msgstr "Destroying VDIs for Instance %(instance_uuid)s"
-#: nova/exception.py:1005
+#: nova/exception.py:1022
#, python-format
msgid "Scheduler Host Filter %(filter_name)s could not be found."
msgstr "Scheduler Host Filter %(filter_name)s could not be found."
-#: nova/exception.py:1009
+#: nova/exception.py:1026
#, python-format
msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s."
msgstr ""
-#: nova/exception.py:1014
+#: nova/exception.py:1031
#, python-format
msgid ""
"Metric %(name)s could not be found on the compute host node "
"%(host)s.%(node)s."
msgstr ""
-#: nova/exception.py:1019
+#: nova/exception.py:1036
#, python-format
msgid "File %(file_path)s could not be found."
msgstr "File %(file_path)s could not be found."
-#: nova/exception.py:1023
+#: nova/exception.py:1040
msgid "Zero files could be found."
msgstr "Zero files could be found."
-#: nova/exception.py:1027
+#: nova/exception.py:1044
#, python-format
msgid "Virtual switch associated with the network adapter %(adapter)s not found."
msgstr "Virtual switch associated with the network adapter %(adapter)s not found."
-#: nova/exception.py:1032
+#: nova/exception.py:1049
#, python-format
msgid "Network adapter %(adapter)s could not be found."
msgstr "Network adapter %(adapter)s could not be found."
-#: nova/exception.py:1036
+#: nova/exception.py:1053
#, python-format
msgid "Class %(class_name)s could not be found: %(exception)s"
msgstr "Class %(class_name)s could not be found: %(exception)s"
-#: nova/exception.py:1040
+#: nova/exception.py:1057
msgid "Action not allowed."
msgstr "Action not allowed."
-#: nova/exception.py:1044
+#: nova/exception.py:1061
msgid "Rotation is not allowed for snapshots"
msgstr "Rotation is not allowed for snapshots"
-#: nova/exception.py:1048
+#: nova/exception.py:1065
msgid "Rotation param is required for backup image_type"
msgstr "Rotation param is required for backup image_type"
-#: nova/exception.py:1053 nova/tests/compute/test_keypairs.py:144
+#: nova/exception.py:1070 nova/tests/compute/test_keypairs.py:146
#, fuzzy, python-format
msgid "Key pair '%(key_name)s' already exists."
msgstr "Key pair %(key_name)s already exists."
-#: nova/exception.py:1057
+#: nova/exception.py:1074
#, python-format
msgid "Instance %(name)s already exists."
msgstr "Instance %(name)s already exists."
-#: nova/exception.py:1061
+#: nova/exception.py:1078
#, python-format
msgid "Flavor with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:1065
+#: nova/exception.py:1082
#, python-format
msgid "Flavor with ID %(flavor_id)s already exists."
msgstr ""
-#: nova/exception.py:1069
+#: nova/exception.py:1086
#, python-format
msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
-#: nova/exception.py:1074
+#: nova/exception.py:1091
#, python-format
msgid "%(path)s is not on shared storage: %(reason)s"
msgstr "%(path)s is not on shared storage: %(reason)s"
-#: nova/exception.py:1078
+#: nova/exception.py:1095
#, python-format
msgid "%(path)s is not on local storage: %(reason)s"
msgstr "%(path)s is not on local storage: %(reason)s"
-#: nova/exception.py:1082
+#: nova/exception.py:1099
#, python-format
msgid "Storage error: %(reason)s"
msgstr ""
-#: nova/exception.py:1086
+#: nova/exception.py:1103
#, python-format
msgid "Migration error: %(reason)s"
msgstr ""
-#: nova/exception.py:1090
+#: nova/exception.py:1107
#, python-format
msgid "Migration pre-check error: %(reason)s"
msgstr ""
-#: nova/exception.py:1094
+#: nova/exception.py:1111
#, python-format
msgid "Malformed message body: %(reason)s"
msgstr "Malformed message body: %(reason)s"
-#: nova/exception.py:1100
+#: nova/exception.py:1117
#, python-format
msgid "Could not find config at %(path)s"
msgstr "Could not find config at %(path)s"
-#: nova/exception.py:1104
+#: nova/exception.py:1121
#, python-format
msgid "Could not load paste app '%(name)s' from %(path)s"
msgstr "Could not load paste app '%(name)s' from %(path)s"
-#: nova/exception.py:1108
+#: nova/exception.py:1125
msgid "When resizing, instances must change flavor!"
msgstr "When resizing, instances must change flavor!"
-#: nova/exception.py:1112
+#: nova/exception.py:1129
#, python-format
msgid "Resize error: %(reason)s"
msgstr ""
-#: nova/exception.py:1116
+#: nova/exception.py:1133
#, python-format
msgid "Server disk was unable to be resized because: %(reason)s"
msgstr ""
-#: nova/exception.py:1120
+#: nova/exception.py:1137
msgid "Flavor's memory is too small for requested image."
msgstr ""
-#: nova/exception.py:1124
+#: nova/exception.py:1141
msgid "Flavor's disk is too small for requested image."
msgstr ""
-#: nova/exception.py:1128
+#: nova/exception.py:1145
#, python-format
msgid "Insufficient free memory on compute node to start %(uuid)s."
msgstr "Insufficient free memory on compute node to start %(uuid)s."
-#: nova/exception.py:1132
+#: nova/exception.py:1149
#, python-format
msgid "No valid host was found. %(reason)s"
msgstr "No valid host was found. %(reason)s"
-#: nova/exception.py:1137
+#: nova/exception.py:1154
#, python-format
msgid "Quota exceeded: code=%(code)s"
msgstr ""
-#: nova/exception.py:1144
+#: nova/exception.py:1161
#, python-format
msgid ""
"Quota exceeded for %(overs)s: Requested %(req)s, but already used "
@@ -1297,45 +1318,45 @@ msgstr ""
"Quota exceeded for %(overs)s: Requested %(req)s, but already used "
"%(used)d of %(allowed)d %(resource)s"
-#: nova/exception.py:1149
+#: nova/exception.py:1166
msgid "Maximum number of floating ips exceeded"
msgstr "Maximum number of floating ips exceeded"
-#: nova/exception.py:1153
+#: nova/exception.py:1170
#, fuzzy
msgid "Maximum number of fixed ips exceeded"
msgstr "Maximum number of floating ips exceeded"
-#: nova/exception.py:1157
+#: nova/exception.py:1174
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "Maximum number of metadata items exceeds %(allowed)d"
-#: nova/exception.py:1161
+#: nova/exception.py:1178
msgid "Personality file limit exceeded"
msgstr "Personality file limit exceeded"
-#: nova/exception.py:1165
+#: nova/exception.py:1182
msgid "Personality file path too long"
msgstr "Personality file path too long"
-#: nova/exception.py:1169
+#: nova/exception.py:1186
msgid "Personality file content too long"
msgstr "Personality file content too long"
-#: nova/exception.py:1173 nova/tests/compute/test_keypairs.py:155
+#: nova/exception.py:1190 nova/tests/compute/test_keypairs.py:157
msgid "Maximum number of key pairs exceeded"
msgstr "Maximum number of key pairs exceeded"
-#: nova/exception.py:1178
+#: nova/exception.py:1195
msgid "Maximum number of security groups or rules exceeded"
msgstr "Maximum number of security groups or rules exceeded"
-#: nova/exception.py:1182
+#: nova/exception.py:1199
msgid "Maximum number of ports exceeded"
msgstr ""
-#: nova/exception.py:1186
+#: nova/exception.py:1203
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: "
@@ -1344,130 +1365,130 @@ msgstr ""
"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: "
"%(reason)s."
-#: nova/exception.py:1191
+#: nova/exception.py:1208
#, python-format
msgid "Aggregate %(aggregate_id)s could not be found."
msgstr "Aggregate %(aggregate_id)s could not be found."
-#: nova/exception.py:1195
+#: nova/exception.py:1212
#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "Aggregate %(aggregate_name)s already exists."
-#: nova/exception.py:1199
+#: nova/exception.py:1216
#, python-format
msgid "Aggregate %(aggregate_id)s has no host %(host)s."
msgstr "Aggregate %(aggregate_id)s has no host %(host)s."
-#: nova/exception.py:1203
+#: nova/exception.py:1220
#, python-format
msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
msgstr "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
-#: nova/exception.py:1208
+#: nova/exception.py:1225
#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "Aggregate %(aggregate_id)s already has host %(host)s."
-#: nova/exception.py:1212
+#: nova/exception.py:1229
msgid "Unable to create flavor"
msgstr ""
-#: nova/exception.py:1216
+#: nova/exception.py:1233
#, python-format
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr "Failed to set admin password on %(instance)s because %(reason)s"
-#: nova/exception.py:1222
+#: nova/exception.py:1239
#, python-format
msgid "Detected existing vlan with id %(vlan)d"
msgstr "Detected existing vlan with id %(vlan)d"
-#: nova/exception.py:1226
+#: nova/exception.py:1243
msgid "There was a conflict when trying to complete your request."
msgstr ""
-#: nova/exception.py:1232
+#: nova/exception.py:1249
#, python-format
msgid "Instance %(instance_id)s could not be found."
msgstr "Instance %(instance_id)s could not be found."
-#: nova/exception.py:1236
+#: nova/exception.py:1253
#, fuzzy, python-format
msgid "Info cache for instance %(instance_uuid)s could not be found."
msgstr "Console for instance %(instance_uuid)s could not be found."
-#: nova/exception.py:1241
+#: nova/exception.py:1258
#, fuzzy, python-format
msgid "Node %(node_id)s could not be found."
msgstr "Volume %(volume_id)s could not be found."
-#: nova/exception.py:1245
+#: nova/exception.py:1262
#, fuzzy, python-format
msgid "Node with UUID %(node_uuid)s could not be found."
msgstr "Port %(port_id)s could not be found."
-#: nova/exception.py:1249
+#: nova/exception.py:1266
#, python-format
msgid "Marker %(marker)s could not be found."
msgstr "Marker %(marker)s could not be found."
-#: nova/exception.py:1254
+#: nova/exception.py:1271
#, python-format
msgid "Invalid id: %(val)s (expecting \"i-...\")."
msgstr "Invalid id: %(val)s (expecting \"i-...\")."
-#: nova/exception.py:1258
+#: nova/exception.py:1275
#, python-format
msgid "Could not fetch image %(image_id)s"
msgstr "Could not fetch image %(image_id)s"
-#: nova/exception.py:1262
+#: nova/exception.py:1279
#, fuzzy, python-format
msgid "Could not upload image %(image_id)s"
msgstr "Could not fetch image %(image_id)s"
-#: nova/exception.py:1266
+#: nova/exception.py:1283
#, python-format
msgid "Task %(task_name)s is already running on host %(host)s"
msgstr "Task %(task_name)s is already running on host %(host)s"
-#: nova/exception.py:1270
+#: nova/exception.py:1287
#, python-format
msgid "Task %(task_name)s is not running on host %(host)s"
msgstr "Task %(task_name)s is not running on host %(host)s"
-#: nova/exception.py:1274
+#: nova/exception.py:1291
#, python-format
msgid "Instance %(instance_uuid)s is locked"
msgstr "Instance %(instance_uuid)s is locked"
-#: nova/exception.py:1278
+#: nova/exception.py:1295
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr ""
-#: nova/exception.py:1282
+#: nova/exception.py:1299
#, python-format
msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s"
msgstr "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s"
-#: nova/exception.py:1287
+#: nova/exception.py:1304
#, python-format
msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat."
msgstr "Unknown config drive format %(format)s. Select one of iso9660 or vfat."
-#: nova/exception.py:1292
-#, fuzzy, python-format
-msgid "Failed to attach network adapter device to %(instance)s"
-msgstr "Failed to dealloc network for deleted instance"
+#: nova/exception.py:1309
+#, python-format
+msgid "Failed to attach network adapter device to %(instance_uuid)s"
+msgstr ""
-#: nova/exception.py:1296
-#, fuzzy, python-format
-msgid "Failed to detach network adapter device from %(instance)s"
-msgstr "Failed to dealloc network for deleted instance"
+#: nova/exception.py:1314
+#, python-format
+msgid "Failed to detach network adapter device from %(instance_uuid)s"
+msgstr ""
-#: nova/exception.py:1300
+#: nova/exception.py:1319
#, python-format
msgid ""
"User data too large. User data must be no larger than %(maxsize)s bytes "
@@ -1476,332 +1497,417 @@ msgstr ""
"User data too large. User data must be no larger than %(maxsize)s bytes "
"once base64 encoded. Your data is %(length)d bytes"
-#: nova/exception.py:1306
+#: nova/exception.py:1325
msgid "User data needs to be valid base 64."
msgstr "User data needs to be valid base 64."
-#: nova/exception.py:1310
+#: nova/exception.py:1329
#, python-format
msgid ""
"Unexpected task state: expecting %(expected)s but the actual state is "
"%(actual)s"
msgstr ""
-#: nova/exception.py:1319
+#: nova/exception.py:1338
#, fuzzy, python-format
msgid ""
"Action for request_id %(request_id)s on instance %(instance_uuid)s not "
"found"
msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
-#: nova/exception.py:1324
+#: nova/exception.py:1343
#, fuzzy, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr "Keypair %(name)s not found for user %(user_id)s"
-#: nova/exception.py:1328
+#: nova/exception.py:1347
#, python-format
msgid ""
"Unexpected VM state: expecting %(expected)s but the actual state is "
"%(actual)s"
msgstr ""
-#: nova/exception.py:1333
+#: nova/exception.py:1352
#, python-format
msgid "The CA file for %(project)s could not be found"
msgstr "The CA file for %(project)s could not be found"
-#: nova/exception.py:1337
+#: nova/exception.py:1356
#, python-format
msgid "The CRL file for %(project)s could not be found"
msgstr "The CRL file for %(project)s could not be found"
-#: nova/exception.py:1341
+#: nova/exception.py:1360
msgid "Instance recreate is not supported."
msgstr ""
-#: nova/exception.py:1345
+#: nova/exception.py:1364
#, python-format
msgid ""
"The service from servicegroup driver %(driver)s is temporarily "
"unavailable."
msgstr ""
-#: nova/exception.py:1350
+#: nova/exception.py:1369
#, python-format
msgid "%(binary)s attempted direct database access which is not allowed by policy"
msgstr ""
-#: nova/exception.py:1355
+#: nova/exception.py:1374
#, python-format
msgid "Virtualization type '%(virt)s' is not supported by this compute driver"
msgstr ""
-#: nova/exception.py:1360
+#: nova/exception.py:1379
#, python-format
msgid ""
"Requested hardware '%(model)s' is not supported by the '%(virt)s' virt "
"driver"
msgstr ""
-#: nova/exception.py:1365
+#: nova/exception.py:1384
#, python-format
msgid "Invalid Base 64 data for file %(path)s"
msgstr ""
-#: nova/exception.py:1369
+#: nova/exception.py:1388
#, fuzzy, python-format
msgid "Build of instance %(instance_uuid)s aborted: %(reason)s"
msgstr "Instance %(instance_id)s is not in rescue mode"
-#: nova/exception.py:1373
+#: nova/exception.py:1392
#, fuzzy, python-format
msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s"
msgstr "Instance %(instance_id)s is not in rescue mode"
-#: nova/exception.py:1378
+#: nova/exception.py:1397
#, fuzzy, python-format
msgid "Shadow table with name %(name)s already exists."
msgstr "Instance Type with name %(name)s already exists."
-#: nova/exception.py:1383
+#: nova/exception.py:1402
#, python-format
msgid "Instance rollback performed due to: %s"
msgstr ""
-#: nova/exception.py:1389
+#: nova/exception.py:1408
#, fuzzy, python-format
msgid "Unsupported object type %(objtype)s"
msgstr "Expected object of type: %s"
-#: nova/exception.py:1393
+#: nova/exception.py:1412
#, python-format
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr ""
-#: nova/exception.py:1397
+#: nova/exception.py:1416
#, python-format
msgid "Version %(objver)s of %(objname)s is not supported"
msgstr ""
-#: nova/exception.py:1401
+#: nova/exception.py:1420
#, python-format
msgid "Cannot modify readonly field %(field)s"
msgstr ""
-#: nova/exception.py:1405
+#: nova/exception.py:1424
#, python-format
msgid "Object action %(action)s failed because: %(reason)s"
msgstr ""
-#: nova/exception.py:1409
+#: nova/exception.py:1428
#, python-format
msgid "Field %(field)s of %(objname)s is not an instance of Field"
msgstr ""
-#: nova/exception.py:1413
+#: nova/exception.py:1432
#, python-format
msgid "Core API extensions are missing: %(missing_apis)s"
msgstr ""
-#: nova/exception.py:1417
+#: nova/exception.py:1436
#, python-format
msgid "Error during following call to agent: %(method)s"
msgstr ""
-#: nova/exception.py:1421
+#: nova/exception.py:1440
#, python-format
msgid "Unable to contact guest agent. The following call timed out: %(method)s"
msgstr ""
-#: nova/exception.py:1426
+#: nova/exception.py:1445
#, python-format
msgid "Agent does not support the call: %(method)s"
msgstr ""
-#: nova/exception.py:1430
+#: nova/exception.py:1449
#, python-format
msgid "Instance group %(group_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:1434
+#: nova/exception.py:1453
#, python-format
msgid "Instance group %(group_uuid)s already exists."
msgstr ""
-#: nova/exception.py:1438
+#: nova/exception.py:1457
#, python-format
msgid "Instance group %(group_uuid)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:1443
+#: nova/exception.py:1462
#, python-format
msgid "Instance group %(group_uuid)s has no member with id %(instance_id)s."
msgstr ""
-#: nova/exception.py:1448
+#: nova/exception.py:1467
#, python-format
msgid "Instance group %(group_uuid)s has no policy %(policy)s."
msgstr ""
-#: nova/exception.py:1452
+#: nova/exception.py:1471
#, python-format
msgid "Number of retries to plugin (%(num_retries)d) exceeded."
msgstr ""
-#: nova/exception.py:1456
+#: nova/exception.py:1475
#, python-format
msgid "There was an error with the download module %(module)s. %(reason)s"
msgstr ""
-#: nova/exception.py:1461
+#: nova/exception.py:1480
#, python-format
msgid ""
"The metadata for this location will not work with this module %(module)s."
" %(reason)s."
msgstr ""
-#: nova/exception.py:1466
+#: nova/exception.py:1485
#, python-format
msgid "The method %(method_name)s is not implemented."
msgstr ""
-#: nova/exception.py:1470
+#: nova/exception.py:1489
#, python-format
msgid "The module %(module)s is misconfigured: %(reason)s."
msgstr ""
-#: nova/exception.py:1474
+#: nova/exception.py:1493
#, python-format
msgid "Error when creating resource monitor: %(monitor)s"
msgstr ""
-#: nova/exception.py:1478
+#: nova/exception.py:1497
#, python-format
msgid "The PCI address %(address)s has an incorrect format."
msgstr ""
-#: nova/exception.py:1482
+#: nova/exception.py:1501
+#, python-format
+msgid ""
+"Invalid PCI Whitelist: The PCI address %(address)s has an invalid "
+"%(field)s."
+msgstr ""
+
+#: nova/exception.py:1506
+msgid ""
+"Invalid PCI Whitelist: The PCI whitelist can specify devname or address, "
+"but not both"
+msgstr ""
+
+#: nova/exception.py:1512
#, python-format
msgid "PCI device %(id)s not found"
msgstr ""
-#: nova/exception.py:1486
+#: nova/exception.py:1516
#, python-format
msgid "PCI Device %(node_id)s:%(address)s not found."
msgstr ""
-#: nova/exception.py:1490
+#: nova/exception.py:1520
#, python-format
msgid ""
"PCI device %(compute_node_id)s:%(address)s is %(status)s instead of "
"%(hopestatus)s"
msgstr ""
-#: nova/exception.py:1496
+#: nova/exception.py:1526
#, python-format
msgid ""
"PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead "
"of %(hopeowner)s"
msgstr ""
-#: nova/exception.py:1502
+#: nova/exception.py:1532
#, python-format
msgid "PCI device request (%requests)s failed"
msgstr ""
-#: nova/exception.py:1507
+#: nova/exception.py:1537
#, python-format
msgid ""
"Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty "
"pool"
msgstr ""
-#: nova/exception.py:1513
+#: nova/exception.py:1543
#, python-format
msgid "Invalid PCI alias definition: %(reason)s"
msgstr ""
-#: nova/exception.py:1517
+#: nova/exception.py:1547
#, python-format
msgid "PCI alias %(alias)s is not defined"
msgstr ""
-#: nova/exception.py:1522
+#: nova/exception.py:1552
#, python-format
msgid "Not enough parameters: %(reason)s"
msgstr ""
-#: nova/exception.py:1527
+#: nova/exception.py:1557
#, python-format
msgid "Invalid PCI devices Whitelist config %(reason)s"
msgstr ""
-#: nova/exception.py:1531
+#: nova/exception.py:1561
#, python-format
msgid "Cannot change %(node_id)s to %(new_node_id)s"
msgstr ""
-#: nova/exception.py:1541
+#: nova/exception.py:1571
#, python-format
msgid ""
"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
"%(reason)s"
msgstr ""
-#: nova/exception.py:1546
+#: nova/exception.py:1576
#, python-format
msgid "Failed to detach PCI device %(dev)s: %(reason)s"
msgstr ""
-#: nova/exception.py:1550
+#: nova/exception.py:1580
#, python-format
msgid "%(type)s hypervisor does not support PCI devices"
msgstr ""
-#: nova/exception.py:1554
+#: nova/exception.py:1584
#, python-format
msgid "Key manager error: %(reason)s"
msgstr ""
-#: nova/exception.py:1558
+#: nova/exception.py:1588
#, python-format
msgid "Failed to remove volume(s): (%(reason)s)"
msgstr ""
-#: nova/exception.py:1562
+#: nova/exception.py:1592
#, python-format
msgid "Provided video model (%(model)s) is not supported."
msgstr ""
-#: nova/exception.py:1566
+#: nova/exception.py:1596
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr ""
-#: nova/exception.py:1571
+#: nova/exception.py:1601
#, python-format
msgid ""
"The requested amount of video memory %(req_vram)d is higher than the "
"maximum allowed by flavor %(max_vram)d."
msgstr ""
-#: nova/exception.py:1576
+#: nova/exception.py:1606
#, python-format
msgid "Provided watchdog action (%(action)s) is not supported."
msgstr ""
-#: nova/exception.py:1580
+#: nova/exception.py:1610
+msgid ""
+"Live migration of instances with config drives is not supported in "
+"libvirt unless libvirt instance path and drive data is shared across "
+"compute nodes."
+msgstr ""
+
+#: nova/exception.py:1616
+#, python-format
msgid ""
-"Block migration of instances with config drives is not supported in "
-"libvirt."
+"Host %(server)s is running an old version of Nova, live migrations "
+"involving that version may cause data loss. Upgrade Nova on %(server)s "
+"and try again."
msgstr ""
-#: nova/exception.py:1585
+#: nova/exception.py:1622
#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr ""
+#: nova/exception.py:1626
+#, python-format
+msgid ""
+"Image vCPU limits %(sockets)d:%(cores)d:%(threads)d exceeds permitted "
+"%(maxsockets)d:%(maxcores)d:%(maxthreads)d"
+msgstr ""
+
+#: nova/exception.py:1631
+#, python-format
+msgid ""
+"Image vCPU topology %(sockets)d:%(cores)d:%(threads)d exceeds permitted "
+"%(maxsockets)d:%(maxcores)d:%(maxthreads)d"
+msgstr ""
+
+#: nova/exception.py:1636
+#, python-format
+msgid ""
+"Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to"
+" satisfy for vcpus count %(vcpus)d"
+msgstr ""
+
+#: nova/exception.py:1641
+#, python-format
+msgid "Architecture name '%(arch)s' is not recognised"
+msgstr ""
+
+#: nova/exception.py:1645
+msgid "CPU and memory allocation must be provided for all NUMA nodes"
+msgstr ""
+
+#: nova/exception.py:1650
+#, python-format
+msgid ""
+"Image property '%(name)s' is not permitted to override NUMA configuration"
+" set against the flavor"
+msgstr ""
+
+#: nova/exception.py:1655
+msgid ""
+"Asymmetric NUMA topologies require explicit assignment of CPUs and memory"
+" to nodes in image or flavor"
+msgstr ""
+
+#: nova/exception.py:1660
+#, python-format
+msgid "CPU number %(cpunum)d is larger than max %(cpumax)d"
+msgstr ""
+
+#: nova/exception.py:1664
+#, python-format
+msgid "CPU number %(cpunum)d is assigned to two nodes"
+msgstr ""
+
+#: nova/exception.py:1668
+#, python-format
+msgid "CPU number %(cpuset)s is not assigned to any node"
+msgstr ""
+
+#: nova/exception.py:1672
+#, python-format
+msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB"
+msgstr ""
+
#: nova/filters.py:84
#, python-format
msgid "Filter %s returned 0 hosts"
@@ -1816,126 +1922,130 @@ msgstr "Failed to send state update notification"
msgid "Failed to get nw_info"
msgstr "Failed to get info for disk %s"
-#: nova/quota.py:1326
+#: nova/quota.py:1332
#, python-format
msgid "Failed to commit reservations %s"
msgstr ""
-#: nova/quota.py:1349
+#: nova/quota.py:1355
#, python-format
msgid "Failed to roll back reservations %s"
msgstr ""
-#: nova/service.py:160
+#: nova/service.py:161
#, fuzzy, python-format
msgid "Starting %(topic)s node (version %(version)s)"
msgstr "Starting %(topic)s node (version %(vcs_string)s)"
-#: nova/service.py:285
+#: nova/service.py:286
msgid "Service killed that has no database entry"
msgstr "Service killed that has no database entry"
-#: nova/service.py:297
+#: nova/service.py:298
msgid "Service error occurred during cleanup_host"
msgstr ""
-#: nova/service.py:314
+#: nova/service.py:315
#, python-format
msgid "Temporary directory is invalid: %s"
msgstr ""
-#: nova/service.py:339
+#: nova/service.py:340
#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr ""
-#: nova/service.py:424
+#: nova/service.py:433
msgid "serve() can only be called once"
msgstr "serve() can only be called once"
-#: nova/utils.py:148
+#: nova/utils.py:147
#, fuzzy, python-format
msgid "Expected to receive %(exp)s bytes, but actually %(act)s"
msgstr ""
"unexpected task state: expecting %(expected)s but the actual state is "
"%(actual)s"
-#: nova/utils.py:354
+#: nova/utils.py:353
#, python-format
msgid "Couldn't get IPv4 : %(ex)s"
msgstr ""
-#: nova/utils.py:370
+#: nova/utils.py:369
#, python-format
msgid "IPv4 address is not found.: %s"
msgstr ""
-#: nova/utils.py:373
+#: nova/utils.py:372
#, python-format
msgid "Couldn't get IPv4 of %(interface)s : %(ex)s"
msgstr ""
-#: nova/utils.py:388
+#: nova/utils.py:387
#, python-format
msgid "Link Local address is not found.:%s"
msgstr "Link Local address is not found.:%s"
-#: nova/utils.py:391
+#: nova/utils.py:390
#, python-format
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s"
-#: nova/utils.py:412
+#: nova/utils.py:411
#, python-format
msgid "Invalid backend: %s"
msgstr "Invalid backend: %s"
-#: nova/utils.py:457
+#: nova/utils.py:454
#, python-format
msgid "Expected object of type: %s"
msgstr "Expected object of type: %s"
-#: nova/utils.py:485
+#: nova/utils.py:482
#, python-format
msgid "Invalid server_string: %s"
msgstr "Invalid server_string: %s"
-#: nova/utils.py:776 nova/virt/configdrive.py:177
+#: nova/utils.py:773
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr "Could not remove tmpdir: %s"
+#: nova/utils.py:964
+msgid "The input is not a string or unicode"
+msgstr ""
+
#: nova/utils.py:966
#, fuzzy, python-format
msgid "%s is not a string or unicode"
msgstr "Server name is not a string or unicode"
-#: nova/utils.py:970
+#: nova/utils.py:973
#, python-format
msgid "%(name)s has a minimum character requirement of %(min_length)s."
msgstr ""
-#: nova/utils.py:975
+#: nova/utils.py:978
#, python-format
msgid "%(name)s has more than %(max_length)s characters."
msgstr ""
-#: nova/utils.py:985
+#: nova/utils.py:988
#, python-format
msgid "%(value_name)s must be an integer"
msgstr ""
-#: nova/utils.py:991
+#: nova/utils.py:994
#, python-format
msgid "%(value_name)s must be >= %(min_value)d"
msgstr ""
-#: nova/utils.py:997
+#: nova/utils.py:1000
#, python-format
msgid "%(value_name)s must be <= %(max_value)d"
msgstr ""
-#: nova/utils.py:1031
+#: nova/utils.py:1034
#, python-format
msgid "Hypervisor version %s is invalid."
msgstr ""
@@ -1945,277 +2055,239 @@ msgstr ""
msgid "Failed to load %(cfgfile)s: %(ex)s"
msgstr ""
-#: nova/wsgi.py:132
+#: nova/wsgi.py:133
#, python-format
msgid "Could not bind to %(host)s:%(port)s"
msgstr ""
-#: nova/wsgi.py:137
+#: nova/wsgi.py:138
#, python-format
msgid "%(name)s listening on %(host)s:%(port)s"
msgstr "%(name)s listening on %(host)s:%(port)s"
-#: nova/wsgi.py:152 nova/openstack/common/sslutils.py:50
+#: nova/wsgi.py:159 nova/openstack/common/sslutils.py:47
#, fuzzy, python-format
msgid "Unable to find cert_file : %s"
msgstr "Unable to find address %r"
-#: nova/wsgi.py:156 nova/openstack/common/sslutils.py:53
+#: nova/wsgi.py:163 nova/openstack/common/sslutils.py:50
#, fuzzy, python-format
msgid "Unable to find ca_file : %s"
msgstr "Unable to find address %r"
-#: nova/wsgi.py:160 nova/openstack/common/sslutils.py:56
+#: nova/wsgi.py:167 nova/openstack/common/sslutils.py:53
#, fuzzy, python-format
msgid "Unable to find key_file : %s"
msgstr "Unable to find address %r"
-#: nova/wsgi.py:164 nova/openstack/common/sslutils.py:59
+#: nova/wsgi.py:171 nova/openstack/common/sslutils.py:56
msgid ""
"When running server in SSL mode, you must specify both a cert_file and "
"key_file option value in your configuration file"
msgstr ""
-#: nova/wsgi.py:195
+#: nova/wsgi.py:202
#, python-format
msgid "Failed to start %(name)s on %(host)s:%(port)s with SSL support"
msgstr ""
-#: nova/wsgi.py:223
+#: nova/wsgi.py:238
msgid "Stopping WSGI server."
msgstr "Stopping WSGI server."
-#: nova/wsgi.py:242
+#: nova/wsgi.py:258
msgid "WSGI server has stopped."
msgstr "WSGI server has stopped."
-#: nova/wsgi.py:311
+#: nova/wsgi.py:327
msgid "You must implement __call__"
msgstr "You must implement __call__"
-#: nova/api/auth.py:72
-msgid "ratelimit_v3 is removed from v3 api."
-msgstr ""
-
-#: nova/api/auth.py:135
+#: nova/api/auth.py:136
msgid "Invalid service catalog json."
msgstr "Invalid service catalog json."
-#: nova/api/auth.py:159
-msgid "Sourcing roles from deprecated X-Role HTTP header"
-msgstr "Sourcing roles from deprecated X-Role HTTP header"
-
#: nova/api/sizelimit.py:53 nova/api/sizelimit.py:62 nova/api/sizelimit.py:76
#: nova/api/metadata/password.py:62
msgid "Request is too large."
msgstr "Request is too large."
-#: nova/api/ec2/__init__.py:88
+#: nova/api/ec2/__init__.py:89
#, python-format
msgid "FaultWrapper: %s"
msgstr "FaultWrapper: %s"
-#: nova/api/ec2/__init__.py:159
+#: nova/api/ec2/__init__.py:160
msgid "Too many failed authentications."
msgstr "Too many failed authentications."
-#: nova/api/ec2/__init__.py:168
-#, python-format
-msgid ""
-"Access key %(access_key)s has had %(failures)d failed authentications and"
-" will be locked out for %(lock_mins)d minutes."
-msgstr ""
-"Access key %(access_key)s has had %(failures)d failed authentications and"
-" will be locked out for %(lock_mins)d minutes."
-
-#: nova/api/ec2/__init__.py:187
+#: nova/api/ec2/__init__.py:188
msgid "Signature not provided"
msgstr "Signature not provided"
-#: nova/api/ec2/__init__.py:192
+#: nova/api/ec2/__init__.py:193
msgid "Access key not provided"
msgstr "Access key not provided"
-#: nova/api/ec2/__init__.py:228 nova/api/ec2/__init__.py:244
+#: nova/api/ec2/__init__.py:229 nova/api/ec2/__init__.py:245
msgid "Failure communicating with keystone"
msgstr "Failure communicating with keystone"
-#: nova/api/ec2/__init__.py:304
+#: nova/api/ec2/__init__.py:305
#, fuzzy
msgid "Timestamp failed validation."
msgstr "Too many failed authentications."
-#: nova/api/ec2/__init__.py:402
+#: nova/api/ec2/__init__.py:403
#, python-format
msgid "Unauthorized request for controller=%(controller)s and action=%(action)s"
msgstr "Unauthorized request for controller=%(controller)s and action=%(action)s"
-#: nova/api/ec2/__init__.py:492
-#, python-format
-msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
-msgstr ""
-
-#: nova/api/ec2/__init__.py:495
-#, python-format
-msgid "%(ex_name)s raised: %(ex_str)s"
-msgstr ""
-
-#: nova/api/ec2/__init__.py:519
-#, python-format
-msgid "Environment: %s"
-msgstr "Environment: %s"
-
-#: nova/api/ec2/__init__.py:521
+#: nova/api/ec2/__init__.py:522
msgid "Unknown error occurred."
msgstr ""
-#: nova/api/ec2/cloud.py:395
+#: nova/api/ec2/cloud.py:391
#, python-format
msgid "Create snapshot of volume %s"
msgstr "Create snapshot of volume %s"
-#: nova/api/ec2/cloud.py:420
+#: nova/api/ec2/cloud.py:418
#, python-format
msgid "Could not find key pair(s): %s"
msgstr "Could not find key pair(s): %s"
-#: nova/api/ec2/cloud.py:436
+#: nova/api/ec2/cloud.py:434
#, python-format
msgid "Create key pair %s"
msgstr "Create key pair %s"
-#: nova/api/ec2/cloud.py:448
+#: nova/api/ec2/cloud.py:446
#, python-format
msgid "Import key %s"
msgstr "Import key %s"
-#: nova/api/ec2/cloud.py:461
+#: nova/api/ec2/cloud.py:459
#, python-format
msgid "Delete key pair %s"
msgstr "Delete key pair %s"
-#: nova/api/ec2/cloud.py:603 nova/api/ec2/cloud.py:733
+#: nova/api/ec2/cloud.py:601 nova/api/ec2/cloud.py:731
msgid "need group_name or group_id"
msgstr ""
-#: nova/api/ec2/cloud.py:608
+#: nova/api/ec2/cloud.py:606
msgid "can't build a valid rule"
msgstr ""
-#: nova/api/ec2/cloud.py:616
+#: nova/api/ec2/cloud.py:614
#, python-format
msgid "Invalid IP protocol %(protocol)s"
msgstr ""
-#: nova/api/ec2/cloud.py:650 nova/api/ec2/cloud.py:686
+#: nova/api/ec2/cloud.py:648 nova/api/ec2/cloud.py:684
msgid "No rule for the specified parameters."
msgstr "No rule for the specified parameters."
-#: nova/api/ec2/cloud.py:764
+#: nova/api/ec2/cloud.py:762
#, python-format
msgid "Get console output for instance %s"
msgstr "Get console output for instance %s"
-#: nova/api/ec2/cloud.py:836
+#: nova/api/ec2/cloud.py:834
#, python-format
msgid "Create volume from snapshot %s"
msgstr "Create volume from snapshot %s"
-#: nova/api/ec2/cloud.py:840 nova/api/openstack/compute/contrib/volumes.py:243
+#: nova/api/ec2/cloud.py:838 nova/api/openstack/compute/contrib/volumes.py:243
#, python-format
msgid "Create volume of %s GB"
msgstr "Create volume of %s GB"
-#: nova/api/ec2/cloud.py:880
+#: nova/api/ec2/cloud.py:878
#, python-format
msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
msgstr "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
-#: nova/api/ec2/cloud.py:910 nova/api/openstack/compute/contrib/volumes.py:506
+#: nova/api/ec2/cloud.py:908 nova/api/openstack/compute/contrib/volumes.py:506
#, python-format
msgid "Detach volume %s"
msgstr "Detach volume %s"
-#: nova/api/ec2/cloud.py:1242
+#: nova/api/ec2/cloud.py:1262
msgid "Allocate address"
msgstr "Allocate address"
-#: nova/api/ec2/cloud.py:1247
+#: nova/api/ec2/cloud.py:1267
#, python-format
msgid "Release address %s"
msgstr "Release address %s"
-#: nova/api/ec2/cloud.py:1252
+#: nova/api/ec2/cloud.py:1272
#, python-format
msgid "Associate address %(public_ip)s to instance %(instance_id)s"
msgstr "Associate address %(public_ip)s to instance %(instance_id)s"
-#: nova/api/ec2/cloud.py:1262
+#: nova/api/ec2/cloud.py:1282
msgid "Unable to associate IP Address, no fixed_ips."
msgstr "Unable to associate IP Address, no fixed_ips."
-#: nova/api/ec2/cloud.py:1270
-#: nova/api/openstack/compute/contrib/floating_ips.py:249
-#, python-format
-msgid "multiple fixed_ips exist, using the first: %s"
-msgstr "multiple fixed_ips exist, using the first: %s"
-
-#: nova/api/ec2/cloud.py:1283
+#: nova/api/ec2/cloud.py:1303
#, python-format
msgid "Disassociate address %s"
msgstr "Disassociate address %s"
-#: nova/api/ec2/cloud.py:1300 nova/api/openstack/compute/servers.py:918
+#: nova/api/ec2/cloud.py:1320 nova/api/openstack/compute/servers.py:920
#: nova/api/openstack/compute/plugins/v3/multiple_create.py:64
msgid "min_count must be <= max_count"
msgstr "min_count must be <= max_count"
-#: nova/api/ec2/cloud.py:1332
+#: nova/api/ec2/cloud.py:1352
msgid "Image must be available"
msgstr "Image must be available"
-#: nova/api/ec2/cloud.py:1429
+#: nova/api/ec2/cloud.py:1452
#, python-format
msgid "Reboot instance %r"
msgstr "Reboot instance %r"
-#: nova/api/ec2/cloud.py:1542
+#: nova/api/ec2/cloud.py:1567
#, python-format
msgid "De-registering image %s"
msgstr "De-registering image %s"
-#: nova/api/ec2/cloud.py:1558
+#: nova/api/ec2/cloud.py:1583
msgid "imageLocation is required"
msgstr "imageLocation is required"
-#: nova/api/ec2/cloud.py:1578
+#: nova/api/ec2/cloud.py:1603
#, python-format
msgid "Registered image %(image_location)s with id %(image_id)s"
msgstr "Registered image %(image_location)s with id %(image_id)s"
-#: nova/api/ec2/cloud.py:1639
+#: nova/api/ec2/cloud.py:1664
msgid "user or group not specified"
msgstr "user or group not specified"
-#: nova/api/ec2/cloud.py:1642
+#: nova/api/ec2/cloud.py:1667
msgid "only group \"all\" is supported"
msgstr "only group \"all\" is supported"
-#: nova/api/ec2/cloud.py:1645
+#: nova/api/ec2/cloud.py:1670
msgid "operation_type must be add or remove"
msgstr "operation_type must be add or remove"
-#: nova/api/ec2/cloud.py:1647
+#: nova/api/ec2/cloud.py:1672
#, python-format
msgid "Updating image %s publicity"
msgstr "Updating image %s publicity"
-#: nova/api/ec2/cloud.py:1660
+#: nova/api/ec2/cloud.py:1685
#, python-format
msgid "Not allowed to modify attributes for image %s"
msgstr "Not allowed to modify attributes for image %s"
-#: nova/api/ec2/cloud.py:1686
+#: nova/api/ec2/cloud.py:1715
#, python-format
msgid ""
"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not "
@@ -2224,306 +2296,208 @@ msgstr ""
"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not "
"have a volume attached at root (%(root)s)"
-#: nova/api/ec2/cloud.py:1717
+#: nova/api/ec2/cloud.py:1748
#, python-format
-msgid "Couldn't stop instance within %d sec"
+msgid ""
+"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
+"%(vm_state)s, current task_state: %(task_state)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1736
+#: nova/api/ec2/cloud.py:1772
#, python-format
msgid "image of %(instance)s at %(now)s"
msgstr "image of %(instance)s at %(now)s"
-#: nova/api/ec2/cloud.py:1761 nova/api/ec2/cloud.py:1811
+#: nova/api/ec2/cloud.py:1797 nova/api/ec2/cloud.py:1847
msgid "resource_id and tag are required"
msgstr ""
-#: nova/api/ec2/cloud.py:1765 nova/api/ec2/cloud.py:1815
+#: nova/api/ec2/cloud.py:1801 nova/api/ec2/cloud.py:1851
#, fuzzy
msgid "Expecting a list of resources"
msgstr "Getting list of instances"
-#: nova/api/ec2/cloud.py:1770 nova/api/ec2/cloud.py:1820
-#: nova/api/ec2/cloud.py:1878
+#: nova/api/ec2/cloud.py:1806 nova/api/ec2/cloud.py:1856
+#: nova/api/ec2/cloud.py:1914
#, fuzzy
msgid "Only instances implemented"
msgstr "instance not present"
-#: nova/api/ec2/cloud.py:1774 nova/api/ec2/cloud.py:1824
+#: nova/api/ec2/cloud.py:1810 nova/api/ec2/cloud.py:1860
#, fuzzy
msgid "Expecting a list of tagSets"
msgstr "Getting list of instances"
-#: nova/api/ec2/cloud.py:1780 nova/api/ec2/cloud.py:1833
+#: nova/api/ec2/cloud.py:1816 nova/api/ec2/cloud.py:1869
msgid "Expecting tagSet to be key/value pairs"
msgstr ""
-#: nova/api/ec2/cloud.py:1787
+#: nova/api/ec2/cloud.py:1823
msgid "Expecting both key and value to be set"
msgstr ""
-#: nova/api/ec2/cloud.py:1838
+#: nova/api/ec2/cloud.py:1874
msgid "Expecting key to be set"
msgstr ""
-#: nova/api/ec2/cloud.py:1912
+#: nova/api/ec2/cloud.py:1948
msgid "Invalid CIDR"
msgstr "Invalid CIDR"
-#: nova/api/ec2/ec2utils.py:254
+#: nova/api/ec2/ec2utils.py:255
#, python-format
msgid "Unacceptable attach status:%s for ec2 API."
msgstr ""
-#: nova/api/ec2/ec2utils.py:277
+#: nova/api/ec2/ec2utils.py:278
msgid "Request must include either Timestamp or Expires, but cannot contain both"
msgstr ""
-#: nova/api/ec2/ec2utils.py:295
+#: nova/api/ec2/ec2utils.py:296
#, fuzzy
msgid "Timestamp is invalid."
msgstr "The request is invalid."
-#: nova/api/metadata/handler.py:111
-msgid ""
-"X-Instance-ID present in request headers. The "
-"'service_neutron_metadata_proxy' option must be enabled to process this "
-"header."
-msgstr ""
-
-#: nova/api/metadata/handler.py:140 nova/api/metadata/handler.py:147
+#: nova/api/metadata/handler.py:148
#, python-format
msgid "Failed to get metadata for ip: %s"
msgstr "Failed to get metadata for ip: %s"
-#: nova/api/metadata/handler.py:142 nova/api/metadata/handler.py:198
+#: nova/api/metadata/handler.py:150 nova/api/metadata/handler.py:207
msgid "An unknown error has occurred. Please try your request again."
msgstr "An unknown error has occurred. Please try your request again."
-#: nova/api/metadata/handler.py:160
+#: nova/api/metadata/handler.py:169
msgid "X-Instance-ID header is missing from request."
msgstr ""
-#: nova/api/metadata/handler.py:162
+#: nova/api/metadata/handler.py:171
msgid "X-Tenant-ID header is missing from request."
msgstr ""
-#: nova/api/metadata/handler.py:164
+#: nova/api/metadata/handler.py:173
msgid "Multiple X-Instance-ID headers found within request."
msgstr ""
-#: nova/api/metadata/handler.py:166
+#: nova/api/metadata/handler.py:175
msgid "Multiple X-Tenant-ID headers found within request."
msgstr ""
-#: nova/api/metadata/handler.py:180
-#, python-format
-msgid ""
-"X-Instance-ID-Signature: %(signature)s does not match the expected value:"
-" %(expected_signature)s for id: %(instance_id)s. Request From: "
-"%(remote_address)s"
-msgstr ""
-
-#: nova/api/metadata/handler.py:189
+#: nova/api/metadata/handler.py:198
#, fuzzy
msgid "Invalid proxy request signature."
msgstr "Invalid request: %s"
-#: nova/api/metadata/handler.py:196 nova/api/metadata/handler.py:203
+#: nova/api/metadata/handler.py:205
#, fuzzy, python-format
msgid "Failed to get metadata for instance id: %s"
msgstr "Failed to get metadata for ip: %s"
-#: nova/api/metadata/handler.py:207
-#, python-format
-msgid ""
-"Tenant_id %(tenant_id)s does not match tenant_id of instance "
-"%(instance_id)s."
-msgstr ""
-
-#: nova/api/metadata/vendordata_json.py:47
-msgid "file does not exist"
-msgstr ""
-
-#: nova/api/metadata/vendordata_json.py:49
-msgid "Unexpected IOError when reading"
-msgstr ""
-
-#: nova/api/metadata/vendordata_json.py:52
-msgid "failed to load json"
-msgstr ""
-
-#: nova/api/openstack/__init__.py:89
+#: nova/api/openstack/__init__.py:92
#, python-format
msgid "Caught error: %s"
msgstr "Caught error: %s"
-#: nova/api/openstack/__init__.py:98
-#, python-format
-msgid "%(url)s returned with HTTP %(status)d"
-msgstr "%(url)s returned with HTTP %(status)d"
-
-#: nova/api/openstack/__init__.py:190
+#: nova/api/openstack/__init__.py:189
msgid "Must specify an ExtensionManager class"
msgstr "Must specify an ExtensionManager class"
-#: nova/api/openstack/__init__.py:236 nova/api/openstack/__init__.py:410
-#, python-format
-msgid ""
-"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such "
-"resource"
-msgstr ""
-"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such "
-"resource"
-
-#: nova/api/openstack/__init__.py:283
-#: nova/api/openstack/compute/plugins/v3/servers.py:99
-#, python-format
-msgid "Not loading %s because it is in the blacklist"
-msgstr ""
-
-#: nova/api/openstack/__init__.py:288
-#: nova/api/openstack/compute/plugins/v3/servers.py:104
-#, python-format
-msgid "Not loading %s because it is not in the whitelist"
-msgstr ""
-
-#: nova/api/openstack/__init__.py:295
-msgid "V3 API has been disabled by configuration"
-msgstr ""
-
-#: nova/api/openstack/__init__.py:308
-#, python-format
-msgid "Extensions in both blacklist and whitelist: %s"
-msgstr ""
-
-#: nova/api/openstack/__init__.py:332
-#, fuzzy, python-format
-msgid "Missing core API extensions: %s"
-msgstr "Loading extension %s"
-
-#: nova/api/openstack/common.py:132
-#, python-format
-msgid ""
-"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. "
-"Bad upgrade or db corrupted?"
-msgstr ""
-"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. "
-"Bad upgrade or db corrupted?"
-
-#: nova/api/openstack/common.py:182
+#: nova/api/openstack/common.py:185
#, python-format
msgid "%s param must be an integer"
msgstr ""
-#: nova/api/openstack/common.py:185
+#: nova/api/openstack/common.py:188
#, python-format
msgid "%s param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:210
+#: nova/api/openstack/common.py:213
msgid "offset param must be an integer"
msgstr "offset param must be an integer"
-#: nova/api/openstack/common.py:216
+#: nova/api/openstack/common.py:219
msgid "limit param must be an integer"
msgstr "limit param must be an integer"
-#: nova/api/openstack/common.py:220
+#: nova/api/openstack/common.py:223
msgid "limit param must be positive"
msgstr "limit param must be positive"
-#: nova/api/openstack/common.py:224
+#: nova/api/openstack/common.py:227
msgid "offset param must be positive"
msgstr "offset param must be positive"
-#: nova/api/openstack/common.py:259 nova/api/openstack/compute/flavors.py:146
-#: nova/api/openstack/compute/servers.py:603
-#: nova/api/openstack/compute/plugins/v3/flavors.py:110
-#: nova/api/openstack/compute/plugins/v3/servers.py:280
-#, python-format
-msgid "marker [%s] not found"
-msgstr "marker [%s] not found"
-
-#: nova/api/openstack/common.py:299
+#: nova/api/openstack/common.py:280
#, python-format
msgid "href %s does not contain version"
msgstr "href %s does not contain version"
-#: nova/api/openstack/common.py:314
+#: nova/api/openstack/common.py:293
msgid "Image metadata limit exceeded"
msgstr "Image metadata limit exceeded"
-#: nova/api/openstack/common.py:322
+#: nova/api/openstack/common.py:301
msgid "Image metadata key cannot be blank"
msgstr "Image metadata key cannot be blank"
-#: nova/api/openstack/common.py:325
+#: nova/api/openstack/common.py:304
msgid "Image metadata key too long"
msgstr "Image metadata key too long"
-#: nova/api/openstack/common.py:328
+#: nova/api/openstack/common.py:307
msgid "Invalid image metadata"
msgstr "Invalid image metadata"
-#: nova/api/openstack/common.py:391
+#: nova/api/openstack/common.py:370
#, python-format
msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s"
msgstr "Cannot '%(action)s' while instance is in %(attr)s %(state)s"
-#: nova/api/openstack/common.py:394
+#: nova/api/openstack/common.py:373
#, python-format
msgid "Cannot '%s' an instance which has never been active"
msgstr ""
-#: nova/api/openstack/common.py:397
+#: nova/api/openstack/common.py:376
#, fuzzy, python-format
msgid "Instance is in an invalid state for '%s'"
msgstr "Instance is in an invalid state for '%(action)s'"
-#: nova/api/openstack/common.py:477
-msgid "Rejecting snapshot request, snapshots currently disabled"
-msgstr "Rejecting snapshot request, snapshots currently disabled"
-
-#: nova/api/openstack/common.py:479
+#: nova/api/openstack/common.py:458
msgid "Instance snapshots are not permitted at this time."
msgstr "Instance snapshots are not permitted at this time."
-#: nova/api/openstack/common.py:600
+#: nova/api/openstack/common.py:579
msgid "Cells is not enabled."
msgstr ""
-#: nova/api/openstack/extensions.py:197
+#: nova/api/openstack/extensions.py:198
#, python-format
msgid "Loaded extension: %s"
msgstr "Loaded extension: %s"
-#: nova/api/openstack/extensions.py:243
+#: nova/api/openstack/extensions.py:244
#: nova/api/openstack/compute/plugins/__init__.py:51
#, python-format
msgid "Exception loading extension: %s"
msgstr "Exception loading extension: %s"
-#: nova/api/openstack/extensions.py:278
-#, python-format
-msgid "Failed to load extension %(ext_factory)s: %(exc)s"
-msgstr "Failed to load extension %(ext_factory)s: %(exc)s"
-
-#: nova/api/openstack/extensions.py:349
+#: nova/api/openstack/extensions.py:350
#, python-format
msgid "Failed to load extension %(classpath)s: %(exc)s"
msgstr "Failed to load extension %(classpath)s: %(exc)s"
-#: nova/api/openstack/extensions.py:372
+#: nova/api/openstack/extensions.py:373
#, fuzzy, python-format
msgid "Failed to load extension %(ext_name)s:%(exc)s"
msgstr "Failed to load extension %(ext_name)s: %(exc)s"
-#: nova/api/openstack/extensions.py:494
+#: nova/api/openstack/extensions.py:495
msgid "Unexpected exception in API method"
msgstr ""
-#: nova/api/openstack/extensions.py:495
+#: nova/api/openstack/extensions.py:496
#, python-format
msgid ""
"Unexpected API Error. Please report this at "
@@ -2532,56 +2506,41 @@ msgid ""
"%s"
msgstr ""
-#: nova/api/openstack/wsgi.py:228 nova/api/openstack/wsgi.py:633
+#: nova/api/openstack/wsgi.py:230 nova/api/openstack/wsgi.py:635
msgid "cannot understand JSON"
msgstr "cannot understand JSON"
-#: nova/api/openstack/wsgi.py:638
+#: nova/api/openstack/wsgi.py:640
msgid "too many body keys"
msgstr "too many body keys"
-#: nova/api/openstack/wsgi.py:682
-#, python-format
-msgid "Exception handling resource: %s"
-msgstr "Exception handling resource: %s"
-
-#: nova/api/openstack/wsgi.py:686
-#, python-format
-msgid "Fault thrown: %s"
-msgstr "Fault thrown: %s"
-
-#: nova/api/openstack/wsgi.py:689
-#, python-format
-msgid "HTTP exception thrown: %s"
-msgstr "HTTP exception thrown: %s"
-
-#: nova/api/openstack/wsgi.py:919
+#: nova/api/openstack/wsgi.py:921
#, python-format
msgid "There is no such action: %s"
msgstr "There is no such action: %s"
-#: nova/api/openstack/wsgi.py:922 nova/api/openstack/wsgi.py:949
+#: nova/api/openstack/wsgi.py:924 nova/api/openstack/wsgi.py:951
#: nova/api/openstack/compute/server_metadata.py:57
#: nova/api/openstack/compute/server_metadata.py:75
#: nova/api/openstack/compute/server_metadata.py:100
#: nova/api/openstack/compute/server_metadata.py:126
-#: nova/api/openstack/compute/contrib/evacuate.py:45
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:58
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:73
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:95
+#: nova/api/openstack/compute/contrib/evacuate.py:47
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:60
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:75
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:97
msgid "Malformed request body"
msgstr "Malformed request body"
-#: nova/api/openstack/wsgi.py:926
+#: nova/api/openstack/wsgi.py:928
#, python-format
msgid "Action: '%(action)s', body: %(body)s"
msgstr ""
-#: nova/api/openstack/wsgi.py:946
+#: nova/api/openstack/wsgi.py:948
msgid "Unsupported Content-Type"
msgstr "Unsupported Content-Type"
-#: nova/api/openstack/wsgi.py:958
+#: nova/api/openstack/wsgi.py:960
#, python-format
msgid ""
"Malformed request URL: URL's project_id '%(project_id)s' doesn't match "
@@ -2610,7 +2569,7 @@ msgid "Initializing extension manager."
msgstr "Initializing extension manager."
#: nova/api/openstack/compute/flavors.py:107
-#: nova/api/openstack/compute/plugins/v3/flavors.py:70
+#: nova/api/openstack/compute/plugins/v3/flavors.py:72
#, python-format
msgid "Invalid is_public filter [%s]"
msgstr "Invalid is_public filter [%s]"
@@ -2625,51 +2584,58 @@ msgstr "Invalid minRam filter [%s]"
msgid "Invalid minDisk filter [%s]"
msgstr "Invalid minDisk filter [%s]"
-#: nova/api/openstack/compute/image_metadata.py:35
-#: nova/api/openstack/compute/images.py:141
-#: nova/api/openstack/compute/images.py:157
+#: nova/api/openstack/compute/flavors.py:146
+#: nova/api/openstack/compute/servers.py:606
+#: nova/api/openstack/compute/plugins/v3/flavors.py:112
+#: nova/api/openstack/compute/plugins/v3/servers.py:303
+#, python-format
+msgid "marker [%s] not found"
+msgstr "marker [%s] not found"
+
+#: nova/api/openstack/compute/image_metadata.py:37
+#: nova/api/openstack/compute/images.py:135
+#: nova/api/openstack/compute/images.py:151
msgid "Image not found."
msgstr "Image not found."
-#: nova/api/openstack/compute/image_metadata.py:78
+#: nova/api/openstack/compute/image_metadata.py:81
msgid "Incorrect request body format"
msgstr "Incorrect request body format"
-#: nova/api/openstack/compute/image_metadata.py:82
+#: nova/api/openstack/compute/image_metadata.py:85
#: nova/api/openstack/compute/server_metadata.py:79
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:108
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:85
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:77
+#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:72
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:79
msgid "Request body and URI mismatch"
msgstr "Request body and URI mismatch"
-#: nova/api/openstack/compute/image_metadata.py:85
+#: nova/api/openstack/compute/image_metadata.py:88
#: nova/api/openstack/compute/server_metadata.py:83
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:111
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:88
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:81
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:83
msgid "Request body contains too many items"
msgstr "Request body contains too many items"
-#: nova/api/openstack/compute/image_metadata.py:117
+#: nova/api/openstack/compute/image_metadata.py:122
msgid "Invalid metadata key"
msgstr "Invalid metadata key"
-#: nova/api/openstack/compute/images.py:162
+#: nova/api/openstack/compute/images.py:156
msgid "You are not allowed to delete the image."
msgstr ""
#: nova/api/openstack/compute/ips.py:67
-#: nova/api/openstack/compute/plugins/v3/ips.py:39
+#: nova/api/openstack/compute/plugins/v3/ips.py:41
msgid "Instance does not exist"
msgstr "Instance does not exist"
-#: nova/api/openstack/compute/ips.py:90
-#: nova/api/openstack/compute/plugins/v3/ips.py:60
+#: nova/api/openstack/compute/ips.py:84
+#: nova/api/openstack/compute/plugins/v3/ips.py:56
msgid "Instance is not a member of specified network"
msgstr "Instance is not a member of specified network"
-#: nova/api/openstack/compute/limits.py:161
+#: nova/api/openstack/compute/limits.py:162
#, python-format
msgid ""
"Only %(value)s %(verb)s request(s) can be made to %(uri)s every "
@@ -2678,445 +2644,372 @@ msgstr ""
"Only %(value)s %(verb)s request(s) can be made to %(uri)s every "
"%(unit_string)s."
-#: nova/api/openstack/compute/limits.py:287
+#: nova/api/openstack/compute/limits.py:288
msgid "This request was rate-limited."
msgstr "This request was rate-limited."
#: nova/api/openstack/compute/server_metadata.py:37
#: nova/api/openstack/compute/server_metadata.py:122
#: nova/api/openstack/compute/server_metadata.py:177
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:41
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:43
msgid "Server does not exist"
msgstr "Server does not exist"
#: nova/api/openstack/compute/server_metadata.py:157
#: nova/api/openstack/compute/server_metadata.py:168
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:144
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:156
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:146
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:158
msgid "Metadata item was not found"
msgstr "Metadata item was not found"
-#: nova/api/openstack/compute/servers.py:81
-msgid ""
-"XML support has been deprecated and may be removed as early as the Juno "
-"release."
-msgstr ""
-
-#: nova/api/openstack/compute/servers.py:551
-#: nova/api/openstack/compute/contrib/cells.py:423
-#: nova/api/openstack/compute/plugins/v3/cells.py:331
+#: nova/api/openstack/compute/servers.py:554
+#: nova/api/openstack/compute/contrib/cells.py:427
msgid "Invalid changes-since value"
msgstr "Invalid changes-since value"
-#: nova/api/openstack/compute/servers.py:570
-#: nova/api/openstack/compute/plugins/v3/servers.py:234
+#: nova/api/openstack/compute/servers.py:573
+#: nova/api/openstack/compute/plugins/v3/servers.py:257
msgid "Only administrators may list deleted instances"
msgstr "Only administrators may list deleted instances"
-#: nova/api/openstack/compute/servers.py:606
-#: nova/api/openstack/compute/plugins/v3/servers.py:283
-#, fuzzy, python-format
-msgid "Flavor '%s' could not be found "
-msgstr "Host '%s' could not be found."
-
-#: nova/api/openstack/compute/servers.py:625
-#: nova/api/openstack/compute/servers.py:772
-#: nova/api/openstack/compute/servers.py:1079
-#: nova/api/openstack/compute/servers.py:1199
-#: nova/api/openstack/compute/servers.py:1384
-#: nova/api/openstack/compute/plugins/v3/servers.py:615
-#: nova/api/openstack/compute/plugins/v3/servers.py:727
-#: nova/api/openstack/compute/plugins/v3/servers.py:846
+#: nova/api/openstack/compute/servers.py:627
+#: nova/api/openstack/compute/servers.py:774
+#: nova/api/openstack/compute/servers.py:1078
+#: nova/api/openstack/compute/servers.py:1203
+#: nova/api/openstack/compute/servers.py:1388
+#: nova/api/openstack/compute/plugins/v3/servers.py:650
+#: nova/api/openstack/compute/plugins/v3/servers.py:768
+#: nova/api/openstack/compute/plugins/v3/servers.py:889
msgid "Instance could not be found"
msgstr "Instance could not be found"
-#: nova/api/openstack/compute/servers.py:656
+#: nova/api/openstack/compute/servers.py:658
#, python-format
msgid "Bad personality format: missing %s"
msgstr "Bad personality format: missing %s"
-#: nova/api/openstack/compute/servers.py:659
+#: nova/api/openstack/compute/servers.py:661
msgid "Bad personality format"
msgstr "Bad personality format"
-#: nova/api/openstack/compute/servers.py:662
+#: nova/api/openstack/compute/servers.py:664
#, python-format
msgid "Personality content for %s cannot be decoded"
msgstr "Personality content for %s cannot be decoded"
-#: nova/api/openstack/compute/servers.py:677
+#: nova/api/openstack/compute/servers.py:679
msgid "Unknown argument : port"
msgstr ""
-#: nova/api/openstack/compute/servers.py:680
-#: nova/api/openstack/compute/plugins/v3/servers.py:338
+#: nova/api/openstack/compute/servers.py:682
+#: nova/api/openstack/compute/plugins/v3/servers.py:361
#, python-format
msgid "Bad port format: port uuid is not in proper format (%s)"
msgstr "Bad port format: port uuid is not in proper format (%s)"
-#: nova/api/openstack/compute/servers.py:690
-#: nova/api/openstack/compute/plugins/v3/servers.py:354
+#: nova/api/openstack/compute/servers.py:692
+#: nova/api/openstack/compute/plugins/v3/servers.py:377
#, python-format
msgid "Bad networks format: network uuid is not in proper format (%s)"
msgstr "Bad networks format: network uuid is not in proper format (%s)"
-#: nova/api/openstack/compute/servers.py:701
-#: nova/api/openstack/compute/plugins/v3/servers.py:327
+#: nova/api/openstack/compute/servers.py:703
+#: nova/api/openstack/compute/plugins/v3/servers.py:350
#, python-format
msgid "Invalid fixed IP address (%s)"
msgstr "Invalid fixed IP address (%s)"
-#: nova/api/openstack/compute/servers.py:714
-#: nova/api/openstack/compute/plugins/v3/servers.py:369
+#: nova/api/openstack/compute/servers.py:716
+#: nova/api/openstack/compute/plugins/v3/servers.py:392
#, python-format
msgid "Duplicate networks (%s) are not allowed"
msgstr "Duplicate networks (%s) are not allowed"
-#: nova/api/openstack/compute/servers.py:720
-#: nova/api/openstack/compute/plugins/v3/servers.py:375
+#: nova/api/openstack/compute/servers.py:722
+#: nova/api/openstack/compute/plugins/v3/servers.py:398
#, python-format
msgid "Bad network format: missing %s"
msgstr "Bad network format: missing %s"
-#: nova/api/openstack/compute/servers.py:723
-#: nova/api/openstack/compute/servers.py:824
-#: nova/api/openstack/compute/plugins/v3/servers.py:378
+#: nova/api/openstack/compute/servers.py:725
+#: nova/api/openstack/compute/servers.py:826
+#: nova/api/openstack/compute/plugins/v3/servers.py:401
msgid "Bad networks format"
msgstr "Bad networks format"
-#: nova/api/openstack/compute/servers.py:749
+#: nova/api/openstack/compute/servers.py:751
msgid "Userdata content cannot be decoded"
msgstr "Userdata content cannot be decoded"
-#: nova/api/openstack/compute/servers.py:754
+#: nova/api/openstack/compute/servers.py:756
msgid "accessIPv4 is not proper IPv4 format"
msgstr "accessIPv4 is not proper IPv4 format"
-#: nova/api/openstack/compute/servers.py:759
+#: nova/api/openstack/compute/servers.py:761
msgid "accessIPv6 is not proper IPv6 format"
msgstr "accessIPv6 is not proper IPv6 format"
-#: nova/api/openstack/compute/servers.py:788
-#: nova/api/openstack/compute/plugins/v3/servers.py:419
+#: nova/api/openstack/compute/servers.py:790
+#: nova/api/openstack/compute/plugins/v3/servers.py:443
msgid "Server name is not defined"
msgstr "Server name is not defined"
-#: nova/api/openstack/compute/servers.py:840
-#: nova/api/openstack/compute/servers.py:968
+#: nova/api/openstack/compute/servers.py:842
+#: nova/api/openstack/compute/servers.py:970
msgid "Invalid flavorRef provided."
msgstr "Invalid flavorRef provided."
-#: nova/api/openstack/compute/servers.py:880
+#: nova/api/openstack/compute/servers.py:882
msgid ""
"Using different block_device_mapping syntaxes is not allowed in the same "
"request."
msgstr ""
-#: nova/api/openstack/compute/servers.py:965
-#: nova/api/openstack/compute/plugins/v3/servers.py:495
+#: nova/api/openstack/compute/servers.py:967
+#: nova/api/openstack/compute/plugins/v3/servers.py:519
msgid "Can not find requested image"
msgstr "Can not find requested image"
-#: nova/api/openstack/compute/servers.py:971
-#: nova/api/openstack/compute/plugins/v3/servers.py:501
+#: nova/api/openstack/compute/servers.py:973
+#: nova/api/openstack/compute/plugins/v3/servers.py:525
msgid "Invalid key_name provided."
msgstr "Invalid key_name provided."
-#: nova/api/openstack/compute/servers.py:974
-#: nova/api/openstack/compute/plugins/v3/servers.py:504
+#: nova/api/openstack/compute/servers.py:976
+#: nova/api/openstack/compute/plugins/v3/servers.py:528
msgid "Invalid config_drive provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1064
+#: nova/api/openstack/compute/servers.py:1063
msgid "HostId cannot be updated."
msgstr "HostId cannot be updated."
-#: nova/api/openstack/compute/servers.py:1068
+#: nova/api/openstack/compute/servers.py:1067
#, fuzzy
msgid "Personality cannot be updated."
msgstr "HostId cannot be updated."
-#: nova/api/openstack/compute/servers.py:1094
-#: nova/api/openstack/compute/servers.py:1113
-#: nova/api/openstack/compute/plugins/v3/servers.py:626
-#: nova/api/openstack/compute/plugins/v3/servers.py:642
+#: nova/api/openstack/compute/servers.py:1093
+#: nova/api/openstack/compute/servers.py:1112
+#: nova/api/openstack/compute/plugins/v3/servers.py:662
+#: nova/api/openstack/compute/plugins/v3/servers.py:679
msgid "Instance has not been resized."
msgstr "Instance has not been resized."
-#: nova/api/openstack/compute/servers.py:1116
-#: nova/api/openstack/compute/plugins/v3/servers.py:645
+#: nova/api/openstack/compute/servers.py:1115
+#: nova/api/openstack/compute/plugins/v3/servers.py:682
#, fuzzy
msgid "Flavor used by the instance could not be found."
msgstr "Instance %(instance_id)s could not be found."
-#: nova/api/openstack/compute/servers.py:1132
-#: nova/api/openstack/compute/plugins/v3/servers.py:659
+#: nova/api/openstack/compute/servers.py:1131
+#: nova/api/openstack/compute/plugins/v3/servers.py:697
msgid "Argument 'type' for reboot must be a string"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1138
-#: nova/api/openstack/compute/plugins/v3/servers.py:665
+#: nova/api/openstack/compute/servers.py:1137
+#: nova/api/openstack/compute/plugins/v3/servers.py:703
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr "Argument 'type' for reboot is not HARD or SOFT"
-#: nova/api/openstack/compute/servers.py:1142
-#: nova/api/openstack/compute/plugins/v3/servers.py:669
+#: nova/api/openstack/compute/servers.py:1141
+#: nova/api/openstack/compute/plugins/v3/servers.py:707
msgid "Missing argument 'type' for reboot"
msgstr "Missing argument 'type' for reboot"
-#: nova/api/openstack/compute/servers.py:1169
-#: nova/api/openstack/compute/plugins/v3/servers.py:697
+#: nova/api/openstack/compute/servers.py:1168
+#: nova/api/openstack/compute/plugins/v3/servers.py:735
msgid "Unable to locate requested flavor."
msgstr "Unable to locate requested flavor."
-#: nova/api/openstack/compute/servers.py:1172
-#: nova/api/openstack/compute/plugins/v3/servers.py:700
+#: nova/api/openstack/compute/servers.py:1171
+#: nova/api/openstack/compute/plugins/v3/servers.py:738
msgid "Resize requires a flavor change."
msgstr "Resize requires a flavor change."
-#: nova/api/openstack/compute/servers.py:1180
-#: nova/api/openstack/compute/plugins/v3/servers.py:708
+#: nova/api/openstack/compute/servers.py:1181
+#: nova/api/openstack/compute/plugins/v3/servers.py:748
msgid "You are not authorized to access the image the instance was started with."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1184
-#: nova/api/openstack/compute/plugins/v3/servers.py:712
+#: nova/api/openstack/compute/servers.py:1185
+#: nova/api/openstack/compute/plugins/v3/servers.py:752
#, fuzzy
msgid "Image that the instance was started with could not be found."
msgstr "Instance %(instance_id)s could not be found."
-#: nova/api/openstack/compute/servers.py:1188
-#: nova/api/openstack/compute/plugins/v3/servers.py:716
+#: nova/api/openstack/compute/servers.py:1189
+#: nova/api/openstack/compute/plugins/v3/servers.py:756
#, fuzzy
msgid "Invalid instance image."
msgstr "%s is a valid instance name"
-#: nova/api/openstack/compute/servers.py:1211
+#: nova/api/openstack/compute/servers.py:1215
msgid "Missing imageRef attribute"
msgstr "Missing imageRef attribute"
-#: nova/api/openstack/compute/servers.py:1216
-#: nova/api/openstack/compute/servers.py:1224
+#: nova/api/openstack/compute/servers.py:1220
+#: nova/api/openstack/compute/servers.py:1228
msgid "Invalid imageRef provided."
msgstr "Invalid imageRef provided."
-#: nova/api/openstack/compute/servers.py:1254
+#: nova/api/openstack/compute/servers.py:1258
msgid "Missing flavorRef attribute"
msgstr "Missing flavorRef attribute"
-#: nova/api/openstack/compute/servers.py:1267
+#: nova/api/openstack/compute/servers.py:1271
msgid "No adminPass was specified"
msgstr "No adminPass was specified"
-#: nova/api/openstack/compute/servers.py:1275
+#: nova/api/openstack/compute/servers.py:1279
#: nova/api/openstack/compute/plugins/v3/admin_password.py:56
#, fuzzy
msgid "Unable to set password on instance"
msgstr "Failed to soft reboot instance."
-#: nova/api/openstack/compute/servers.py:1284
+#: nova/api/openstack/compute/servers.py:1288
msgid "Unable to parse metadata key/value pairs."
msgstr "Unable to parse metadata key/value pairs."
-#: nova/api/openstack/compute/servers.py:1297
+#: nova/api/openstack/compute/servers.py:1301
msgid "Resize request has invalid 'flavorRef' attribute."
msgstr "Resize request has invalid 'flavorRef' attribute."
-#: nova/api/openstack/compute/servers.py:1300
+#: nova/api/openstack/compute/servers.py:1304
msgid "Resize requests require 'flavorRef' attribute."
msgstr "Resize requests require 'flavorRef' attribute."
-#: nova/api/openstack/compute/servers.py:1320
+#: nova/api/openstack/compute/servers.py:1324
msgid "Could not parse imageRef from request."
msgstr "Could not parse imageRef from request."
-#: nova/api/openstack/compute/servers.py:1390
-#: nova/api/openstack/compute/plugins/v3/servers.py:852
+#: nova/api/openstack/compute/servers.py:1394
+#: nova/api/openstack/compute/plugins/v3/servers.py:895
msgid "Cannot find image for rebuild"
msgstr "Cannot find image for rebuild"
-#: nova/api/openstack/compute/servers.py:1423
+#: nova/api/openstack/compute/servers.py:1428
msgid "createImage entity requires name attribute"
msgstr "createImage entity requires name attribute"
-#: nova/api/openstack/compute/servers.py:1432
-#: nova/api/openstack/compute/contrib/admin_actions.py:286
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:85
-#: nova/api/openstack/compute/plugins/v3/servers.py:892
+#: nova/api/openstack/compute/servers.py:1437
+#: nova/api/openstack/compute/contrib/admin_actions.py:283
+#: nova/api/openstack/compute/plugins/v3/servers.py:936
msgid "Invalid metadata"
msgstr "Invalid metadata"
-#: nova/api/openstack/compute/servers.py:1490
+#: nova/api/openstack/compute/servers.py:1495
msgid "Invalid adminPass"
-msgstr "Invalid adminPass"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:63
-#: nova/api/openstack/compute/contrib/admin_actions.py:88
-#: nova/api/openstack/compute/contrib/admin_actions.py:113
-#: nova/api/openstack/compute/contrib/admin_actions.py:135
-#: nova/api/openstack/compute/contrib/admin_actions.py:176
-#: nova/api/openstack/compute/contrib/admin_actions.py:195
-#: nova/api/openstack/compute/contrib/admin_actions.py:214
-#: nova/api/openstack/compute/contrib/admin_actions.py:233
-#: nova/api/openstack/compute/contrib/admin_actions.py:391
-#: nova/api/openstack/compute/contrib/multinic.py:43
-#: nova/api/openstack/compute/contrib/rescue.py:45
-#: nova/api/openstack/compute/contrib/shelve.py:43
-msgid "Server not found"
-msgstr "Server not found"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:66
-msgid "Virt driver does not implement pause function."
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:70
-#, python-format
-msgid "Compute.api::pause %s"
-msgstr "Compute.api::pause %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:91
-msgid "Virt driver does not implement unpause function."
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:95
-#, python-format
-msgid "Compute.api::unpause %s"
-msgstr "Compute.api::unpause %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:117
-#, python-format
-msgid "compute.api::suspend %s"
-msgstr "compute.api::suspend %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:139
-#, python-format
-msgid "compute.api::resume %s"
-msgstr "compute.api::resume %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:163
-#, python-format
-msgid "Error in migrate %s"
-msgstr "Error in migrate %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:182
-#, python-format
-msgid "Compute.api::reset_network %s"
-msgstr "Compute.api::reset_network %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:201
-#, python-format
-msgid "Compute.api::inject_network_info %s"
-msgstr "Compute.api::inject_network_info %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:218
-#, python-format
-msgid "Compute.api::lock %s"
-msgstr "Compute.api::lock %s"
+msgstr "Invalid adminPass"
-#: nova/api/openstack/compute/contrib/admin_actions.py:237
-#, python-format
-msgid "Compute.api::unlock %s"
-msgstr "Compute.api::unlock %s"
+#: nova/api/openstack/compute/contrib/admin_actions.py:64
+#: nova/api/openstack/compute/contrib/admin_actions.py:86
+#: nova/api/openstack/compute/contrib/admin_actions.py:108
+#: nova/api/openstack/compute/contrib/admin_actions.py:130
+#: nova/api/openstack/compute/contrib/admin_actions.py:173
+#: nova/api/openstack/compute/contrib/admin_actions.py:192
+#: nova/api/openstack/compute/contrib/admin_actions.py:211
+#: nova/api/openstack/compute/contrib/admin_actions.py:230
+#: nova/api/openstack/compute/contrib/admin_actions.py:388
+#: nova/api/openstack/compute/contrib/multinic.py:44
+#: nova/api/openstack/compute/contrib/rescue.py:45
+#: nova/api/openstack/compute/contrib/shelve.py:43
+msgid "Server not found"
+msgstr "Server not found"
-#: nova/api/openstack/compute/contrib/admin_actions.py:263
+#: nova/api/openstack/compute/contrib/admin_actions.py:260
#, python-format
msgid "createBackup entity requires %s attribute"
msgstr "createBackup entity requires %s attribute"
-#: nova/api/openstack/compute/contrib/admin_actions.py:267
+#: nova/api/openstack/compute/contrib/admin_actions.py:264
msgid "Malformed createBackup entity"
msgstr "Malformed createBackup entity"
-#: nova/api/openstack/compute/contrib/admin_actions.py:273
+#: nova/api/openstack/compute/contrib/admin_actions.py:270
msgid "createBackup attribute 'rotation' must be an integer"
msgstr "createBackup attribute 'rotation' must be an integer"
-#: nova/api/openstack/compute/contrib/admin_actions.py:276
+#: nova/api/openstack/compute/contrib/admin_actions.py:273
#, fuzzy
msgid "createBackup attribute 'rotation' must be greater than or equal to zero"
msgstr "createBackup attribute 'rotation' must be an integer"
-#: nova/api/openstack/compute/contrib/admin_actions.py:292
-#: nova/api/openstack/compute/contrib/console_output.py:45
+#: nova/api/openstack/compute/contrib/admin_actions.py:289
+#: nova/api/openstack/compute/contrib/console_output.py:46
#: nova/api/openstack/compute/contrib/server_start_stop.py:40
msgid "Instance not found"
msgstr "Instance not found"
-#: nova/api/openstack/compute/contrib/admin_actions.py:323
-#: nova/api/openstack/compute/plugins/v3/migrate_server.py:80
+#: nova/api/openstack/compute/contrib/admin_actions.py:320
msgid ""
"host, block_migration and disk_over_commit must be specified for live "
"migration."
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:360
+#: nova/api/openstack/compute/contrib/admin_actions.py:357
#, fuzzy, python-format
msgid "Live migration of instance %s to another host failed"
msgstr "Live migration of instance %(id)s to host %(host)s failed"
-#: nova/api/openstack/compute/contrib/admin_actions.py:363
+#: nova/api/openstack/compute/contrib/admin_actions.py:360
#, python-format
msgid "Live migration of instance %(id)s to host %(host)s failed"
msgstr "Live migration of instance %(id)s to host %(host)s failed"
-#: nova/api/openstack/compute/contrib/admin_actions.py:381
-#: nova/api/openstack/compute/plugins/v3/admin_actions.py:83
+#: nova/api/openstack/compute/contrib/admin_actions.py:378
#, python-format
msgid "Desired state must be specified. Valid states are: %s"
msgstr "Desired state must be specified. Valid states are: %s"
-#: nova/api/openstack/compute/contrib/admin_actions.py:395
+#: nova/api/openstack/compute/contrib/agents.py:100
+#: nova/api/openstack/compute/contrib/agents.py:118
+#: nova/api/openstack/compute/contrib/agents.py:156
+#: nova/api/openstack/compute/contrib/cloudpipe_update.py:55
#, python-format
-msgid "Compute.api::resetState %s"
-msgstr "Compute.api::resetState %s"
-
-#: nova/api/openstack/compute/contrib/aggregates.py:99
-#, fuzzy, python-format
-msgid "Cannot show aggregate: %s"
-msgstr "Cannot show aggregate: %(id)s"
+msgid "Invalid request body: %s"
+msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:137
-#, fuzzy, python-format
-msgid "Cannot update aggregate: %s"
-msgstr "Cannot update aggregate: %(id)s"
+#: nova/api/openstack/compute/contrib/aggregates.py:39
+msgid "Only host parameter can be specified"
+msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:151
-#, fuzzy, python-format
-msgid "Cannot delete aggregate: %s"
-msgstr "Cannot delete aggregate: %(id)s"
+#: nova/api/openstack/compute/contrib/aggregates.py:42
+msgid "Host parameter must be specified"
+msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:162
+#: nova/api/openstack/compute/contrib/aggregates.py:168
#, python-format
msgid "Aggregates does not have %s action"
msgstr "Aggregates does not have %s action"
-#: nova/api/openstack/compute/contrib/aggregates.py:166
+#: nova/api/openstack/compute/contrib/aggregates.py:172
#: nova/api/openstack/compute/contrib/flavormanage.py:55
#: nova/api/openstack/compute/contrib/keypairs.py:86
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:167
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:169
msgid "Invalid request body"
msgstr "Invalid request body"
-#: nova/api/openstack/compute/contrib/aggregates.py:176
-#: nova/api/openstack/compute/contrib/aggregates.py:181
+#: nova/api/openstack/compute/contrib/aggregates.py:182
+#: nova/api/openstack/compute/contrib/aggregates.py:187
#, python-format
msgid "Cannot add host %(host)s in aggregate %(id)s"
msgstr "Cannot add host %(host)s in aggregate %(id)s"
-#: nova/api/openstack/compute/contrib/aggregates.py:195
-#: nova/api/openstack/compute/contrib/aggregates.py:199
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:151
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:155
+#: nova/api/openstack/compute/contrib/aggregates.py:201
+#: nova/api/openstack/compute/contrib/aggregates.py:205
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:153
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:157
#, python-format
msgid "Cannot remove host %(host)s in aggregate %(id)s"
msgstr "Cannot remove host %(host)s in aggregate %(id)s"
-#: nova/api/openstack/compute/contrib/aggregates.py:218
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:175
+#: nova/api/openstack/compute/contrib/aggregates.py:224
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:177
msgid "The value of metadata must be a dict"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:230
+#: nova/api/openstack/compute/contrib/aggregates.py:237
#, python-format
msgid "Cannot set metadata %(metadata)s in aggregate %(id)s"
msgstr "Cannot set metadata %(metadata)s in aggregate %(id)s"
@@ -3132,32 +3025,31 @@ msgstr ""
msgid "Delete snapshot with id: %s"
msgstr "Delete snapshot with id: %s"
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:104
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:103
#, fuzzy
msgid "Attach interface"
msgstr "Failed to add interface: %s"
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:119
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:154
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:177
-#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:166
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:116
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:145
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:166
+#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:174
+#: nova/network/security_group/neutron_driver.py:510
+#: nova/network/security_group/neutron_driver.py:514
+#: nova/network/security_group/neutron_driver.py:518
+#: nova/network/security_group/neutron_driver.py:522
+#: nova/network/security_group/neutron_driver.py:526
#, fuzzy
msgid "Network driver does not support this function."
msgstr "Virt driver does not implement uptime function."
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:123
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:120
#, fuzzy
msgid "Failed to attach interface"
msgstr "Failed to add interface: %s"
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:130
-#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:128
-#, fuzzy
-msgid "Attachments update is not supported"
-msgstr "attribute not supported: %s"
-
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:142
-#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:139
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:136
+#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:144
#, fuzzy, python-format
msgid "Detach interface %s"
msgstr "Starting VLAN interface %s"
@@ -3171,42 +3063,35 @@ msgstr ""
msgid "Must specify id or address"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:252
+#: nova/api/openstack/compute/contrib/cells.py:250
#, fuzzy, python-format
msgid "Cell %(id)s not found."
msgstr "Rule (%s) not found"
-#: nova/api/openstack/compute/contrib/cells.py:285
-#: nova/api/openstack/compute/plugins/v3/cells.py:192
+#: nova/api/openstack/compute/contrib/cells.py:286
#, fuzzy
msgid "Cell name cannot be empty"
msgstr "Security group name cannot be empty"
#: nova/api/openstack/compute/contrib/cells.py:289
-#: nova/api/openstack/compute/plugins/v3/cells.py:196
msgid "Cell name cannot contain '!' or '.'"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:296
-#: nova/api/openstack/compute/plugins/v3/cells.py:203
+#: nova/api/openstack/compute/contrib/cells.py:295
msgid "Cell type must be 'parent' or 'child'"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:352
-#: nova/api/openstack/compute/contrib/cells.py:376
-#: nova/api/openstack/compute/plugins/v3/cells.py:259
-#: nova/api/openstack/compute/plugins/v3/cells.py:282
+#: nova/api/openstack/compute/contrib/cells.py:353
+#: nova/api/openstack/compute/contrib/cells.py:378
#, fuzzy
msgid "No cell information in request"
msgstr "Block device information present: %s"
#: nova/api/openstack/compute/contrib/cells.py:357
-#: nova/api/openstack/compute/plugins/v3/cells.py:264
msgid "No cell name in request"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:411
-#: nova/api/openstack/compute/plugins/v3/cells.py:319
+#: nova/api/openstack/compute/contrib/cells.py:415
msgid "Only 'updated_since', 'project_id' and 'deleted' are understood."
msgstr ""
@@ -3238,19 +3123,19 @@ msgstr ""
msgid "The requested console type details are not accessible"
msgstr ""
-#: nova/api/openstack/compute/contrib/console_output.py:51
+#: nova/api/openstack/compute/contrib/console_output.py:52
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr "os-getConsoleOutput malformed or missing from request body"
-#: nova/api/openstack/compute/contrib/console_output.py:62
+#: nova/api/openstack/compute/contrib/console_output.py:63
msgid "Length in request body must be an integer value"
msgstr "Length in request body must be an integer value"
-#: nova/api/openstack/compute/contrib/console_output.py:70
+#: nova/api/openstack/compute/contrib/console_output.py:71
msgid "Unable to get console"
msgstr "Unable to get console"
-#: nova/api/openstack/compute/contrib/console_output.py:75
+#: nova/api/openstack/compute/contrib/console_output.py:76
#: nova/api/openstack/compute/plugins/v3/console_output.py:60
msgid "Unable to get console log, functionality not implemented"
msgstr ""
@@ -3261,17 +3146,17 @@ msgid "Instance not yet ready"
msgstr "instance %s:not booted"
#: nova/api/openstack/compute/contrib/consoles.py:52
-#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:62
+#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:60
msgid "Unable to get vnc console, functionality not implemented"
msgstr ""
#: nova/api/openstack/compute/contrib/consoles.py:76
-#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:93
+#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:89
msgid "Unable to get spice console, functionality not implemented"
msgstr ""
#: nova/api/openstack/compute/contrib/consoles.py:101
-#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:127
+#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:121
msgid "Unable to get rdp console, functionality not implemented"
msgstr ""
@@ -3280,24 +3165,27 @@ msgstr ""
msgid "%s must be either 'MANUAL' or 'AUTO'."
msgstr "%s must be either 'MANUAL' or 'AUTO'."
-#: nova/api/openstack/compute/contrib/evacuate.py:53
-#, fuzzy
-msgid "host and onSharedStorage must be specified."
-msgstr "host and block_migration must be specified."
+#: nova/api/openstack/compute/contrib/evacuate.py:54
+msgid "host must be specified."
+msgstr ""
#: nova/api/openstack/compute/contrib/evacuate.py:61
+msgid "onSharedStorage must be specified."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/evacuate.py:69
#: nova/api/openstack/compute/plugins/v3/evacuate.py:67
msgid "admin password can't be changed on existing disk"
msgstr ""
-#: nova/api/openstack/compute/contrib/evacuate.py:71
-#: nova/api/openstack/compute/plugins/v3/evacuate.py:77
+#: nova/api/openstack/compute/contrib/evacuate.py:80
+#: nova/api/openstack/compute/plugins/v3/evacuate.py:78
#, python-format
msgid "Compute host %s not found."
msgstr ""
-#: nova/api/openstack/compute/contrib/evacuate.py:77
-#: nova/api/openstack/compute/plugins/v3/evacuate.py:83
+#: nova/api/openstack/compute/contrib/evacuate.py:86
+#: nova/api/openstack/compute/plugins/v3/evacuate.py:84
msgid "The target host can't be the same one."
msgstr ""
@@ -3324,8 +3212,12 @@ msgstr "Access list not available for public flavors."
msgid "No request body"
msgstr "No request body"
+#: nova/api/openstack/compute/contrib/flavor_access.py:170
+#: nova/api/openstack/compute/contrib/flavor_access.py:194
+msgid "Missing tenant parameter"
+msgstr ""
+
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:56
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:42
msgid "No Request Body"
msgstr "No Request Body"
@@ -3335,8 +3227,8 @@ msgstr ""
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:134
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:150
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:113
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:132
+#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:96
+#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:115
#, python-format
msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s."
msgstr ""
@@ -3346,78 +3238,87 @@ msgstr ""
msgid "DNS entries not found."
msgstr "Instance not found"
-#: nova/api/openstack/compute/contrib/floating_ips.py:129
-#: nova/api/openstack/compute/contrib/floating_ips.py:177
+#: nova/api/openstack/compute/contrib/floating_ips.py:130
+#: nova/api/openstack/compute/contrib/floating_ips.py:186
#, python-format
msgid "Floating ip not found for id %s"
msgstr "Floating ip not found for id %s"
-#: nova/api/openstack/compute/contrib/floating_ips.py:162
+#: nova/api/openstack/compute/contrib/floating_ips.py:163
#, python-format
msgid "No more floating ips in pool %s."
msgstr "No more floating ips in pool %s."
-#: nova/api/openstack/compute/contrib/floating_ips.py:164
+#: nova/api/openstack/compute/contrib/floating_ips.py:165
msgid "No more floating ips available."
msgstr "No more floating ips available."
-#: nova/api/openstack/compute/contrib/floating_ips.py:218
-#: nova/api/openstack/compute/contrib/floating_ips.py:283
-#: nova/api/openstack/compute/contrib/security_groups.py:481
+#: nova/api/openstack/compute/contrib/floating_ips.py:169
+#, python-format
+msgid "IP allocation over quota in pool %s."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:171
+msgid "IP allocation over quota."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:223
+#: nova/api/openstack/compute/contrib/floating_ips.py:288
+#: nova/api/openstack/compute/contrib/security_groups.py:488
msgid "Missing parameter dict"
msgstr "Missing parameter dict"
-#: nova/api/openstack/compute/contrib/floating_ips.py:221
-#: nova/api/openstack/compute/contrib/floating_ips.py:286
+#: nova/api/openstack/compute/contrib/floating_ips.py:226
+#: nova/api/openstack/compute/contrib/floating_ips.py:291
msgid "Address not specified"
msgstr "Address not specified"
-#: nova/api/openstack/compute/contrib/floating_ips.py:227
+#: nova/api/openstack/compute/contrib/floating_ips.py:232
msgid "No nw_info cache associated with instance"
msgstr "No nw_info cache associated with instance"
-#: nova/api/openstack/compute/contrib/floating_ips.py:232
+#: nova/api/openstack/compute/contrib/floating_ips.py:237
msgid "No fixed ips associated to instance"
msgstr "No fixed ips associated to instance"
-#: nova/api/openstack/compute/contrib/floating_ips.py:243
+#: nova/api/openstack/compute/contrib/floating_ips.py:248
#, fuzzy
msgid "Specified fixed address not assigned to instance"
msgstr "No fixed ips associated to instance"
-#: nova/api/openstack/compute/contrib/floating_ips.py:257
+#: nova/api/openstack/compute/contrib/floating_ips.py:262
msgid "floating ip is already associated"
msgstr "floating ip is already associated"
-#: nova/api/openstack/compute/contrib/floating_ips.py:260
+#: nova/api/openstack/compute/contrib/floating_ips.py:265
msgid "l3driver call to add floating ip failed"
msgstr "l3driver call to add floating ip failed"
-#: nova/api/openstack/compute/contrib/floating_ips.py:263
-#: nova/api/openstack/compute/contrib/floating_ips.py:294
+#: nova/api/openstack/compute/contrib/floating_ips.py:268
+#: nova/api/openstack/compute/contrib/floating_ips.py:299
msgid "floating ip not found"
msgstr "floating ip not found"
-#: nova/api/openstack/compute/contrib/floating_ips.py:268
+#: nova/api/openstack/compute/contrib/floating_ips.py:273
msgid "Error. Unable to associate floating ip"
msgstr "Error. Unable to associate floating ip"
-#: nova/api/openstack/compute/contrib/floating_ips.py:309
+#: nova/api/openstack/compute/contrib/floating_ips.py:314
msgid "Floating ip is not associated"
msgstr "Floating ip is not associated"
-#: nova/api/openstack/compute/contrib/floating_ips.py:313
+#: nova/api/openstack/compute/contrib/floating_ips.py:318
#, fuzzy, python-format
msgid "Floating ip %(address)s is not associated with instance %(id)s."
msgstr "Floating ip %(address)s is not associated."
-#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:118
+#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:116
#: nova/api/openstack/compute/contrib/services.py:173
#: nova/api/openstack/compute/plugins/v3/services.py:124
msgid "Unknown action"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:146
+#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:144
#: nova/cmd/manage.py:417
#, python-format
msgid "/%s should be specified as single address(es) not in cidr format"
@@ -3428,86 +3329,82 @@ msgstr ""
msgid "fping utility is not found."
msgstr "floating ip not found"
-#: nova/api/openstack/compute/contrib/hosts.py:183
-#: nova/api/openstack/compute/plugins/v3/hosts.py:128
+#: nova/api/openstack/compute/contrib/hosts.py:185
#, python-format
msgid "Invalid update setting: '%s'"
msgstr "Invalid update setting: '%s'"
-#: nova/api/openstack/compute/contrib/hosts.py:186
-#: nova/api/openstack/compute/plugins/v3/hosts.py:131
+#: nova/api/openstack/compute/contrib/hosts.py:188
#, python-format
msgid "Invalid status: '%s'"
msgstr "Invalid status: '%s'"
-#: nova/api/openstack/compute/contrib/hosts.py:188
-#: nova/api/openstack/compute/plugins/v3/hosts.py:133
+#: nova/api/openstack/compute/contrib/hosts.py:190
#, python-format
msgid "Invalid mode: '%s'"
msgstr "Invalid mode: '%s'"
-#: nova/api/openstack/compute/contrib/hosts.py:190
-#: nova/api/openstack/compute/plugins/v3/hosts.py:135
+#: nova/api/openstack/compute/contrib/hosts.py:192
msgid "'status' or 'maintenance_mode' needed for host update"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:206
-#: nova/api/openstack/compute/plugins/v3/hosts.py:152
+#: nova/api/openstack/compute/contrib/hosts.py:208
+#: nova/api/openstack/compute/plugins/v3/hosts.py:135
#, fuzzy, python-format
msgid "Putting host %(host_name)s in maintenance mode %(mode)s."
msgstr "Putting host %(host)s in maintenance mode %(mode)s."
-#: nova/api/openstack/compute/contrib/hosts.py:212
-#: nova/api/openstack/compute/plugins/v3/hosts.py:158
+#: nova/api/openstack/compute/contrib/hosts.py:214
+#: nova/api/openstack/compute/plugins/v3/hosts.py:141
#, fuzzy
msgid "Virt driver does not implement host maintenance mode."
msgstr "Virt driver does not implement uptime function."
-#: nova/api/openstack/compute/contrib/hosts.py:227
-#: nova/api/openstack/compute/plugins/v3/hosts.py:174
+#: nova/api/openstack/compute/contrib/hosts.py:229
+#: nova/api/openstack/compute/plugins/v3/hosts.py:157
#, fuzzy, python-format
msgid "Enabling host %s."
msgstr "Calling setter %s"
-#: nova/api/openstack/compute/contrib/hosts.py:229
-#: nova/api/openstack/compute/plugins/v3/hosts.py:176
+#: nova/api/openstack/compute/contrib/hosts.py:231
+#: nova/api/openstack/compute/plugins/v3/hosts.py:159
#, fuzzy, python-format
msgid "Disabling host %s."
msgstr "Updating host stats"
-#: nova/api/openstack/compute/contrib/hosts.py:234
-#: nova/api/openstack/compute/plugins/v3/hosts.py:181
+#: nova/api/openstack/compute/contrib/hosts.py:236
+#: nova/api/openstack/compute/plugins/v3/hosts.py:164
#, fuzzy
msgid "Virt driver does not implement host disabled status."
msgstr "Virt driver does not implement uptime function."
-#: nova/api/openstack/compute/contrib/hosts.py:250
-#: nova/api/openstack/compute/plugins/v3/hosts.py:199
+#: nova/api/openstack/compute/contrib/hosts.py:252
+#: nova/api/openstack/compute/plugins/v3/hosts.py:182
#, fuzzy
msgid "Virt driver does not implement host power management."
msgstr "Virt driver does not implement uptime function."
-#: nova/api/openstack/compute/contrib/hosts.py:336
-#: nova/api/openstack/compute/plugins/v3/hosts.py:292
+#: nova/api/openstack/compute/contrib/hosts.py:338
+#: nova/api/openstack/compute/plugins/v3/hosts.py:275
msgid "Describe-resource is admin only functionality"
msgstr "Describe-resource is admin only functionality"
-#: nova/api/openstack/compute/contrib/hypervisors.py:193
-#: nova/api/openstack/compute/contrib/hypervisors.py:205
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:93
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:105
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:140
+#: nova/api/openstack/compute/contrib/hypervisors.py:208
+#: nova/api/openstack/compute/contrib/hypervisors.py:220
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:100
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:112
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:147
#, python-format
msgid "Hypervisor with ID '%s' could not be found."
msgstr "Hypervisor with ID '%s' could not be found."
-#: nova/api/openstack/compute/contrib/hypervisors.py:213
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:113
+#: nova/api/openstack/compute/contrib/hypervisors.py:228
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:120
msgid "Virt driver does not implement uptime function."
msgstr "Virt driver does not implement uptime function."
-#: nova/api/openstack/compute/contrib/hypervisors.py:229
-#: nova/api/openstack/compute/contrib/hypervisors.py:239
+#: nova/api/openstack/compute/contrib/hypervisors.py:244
+#: nova/api/openstack/compute/contrib/hypervisors.py:254
#, python-format
msgid "No hypervisor matching '%s' could be found."
msgstr "No hypervisor matching '%s' could be found."
@@ -3522,27 +3419,22 @@ msgstr "Invalid timestamp for date %s"
msgid "Quota exceeded, too many key pairs."
msgstr "Quota exceeded, too many key pairs."
-#: nova/api/openstack/compute/contrib/multinic.py:54
+#: nova/api/openstack/compute/contrib/multinic.py:55
msgid "Missing 'networkId' argument for addFixedIp"
msgstr "Missing 'networkId' argument for addFixedIp"
-#: nova/api/openstack/compute/contrib/multinic.py:70
+#: nova/api/openstack/compute/contrib/multinic.py:75
msgid "Missing 'address' argument for removeFixedIp"
msgstr "Missing 'address' argument for removeFixedIp"
-#: nova/api/openstack/compute/contrib/multinic.py:80
-#, python-format
-msgid "Unable to find address %r"
-msgstr "Unable to find address %r"
-
#: nova/api/openstack/compute/contrib/networks_associate.py:40
#: nova/api/openstack/compute/contrib/networks_associate.py:56
#: nova/api/openstack/compute/contrib/networks_associate.py:74
-#: nova/api/openstack/compute/contrib/os_networks.py:78
-#: nova/api/openstack/compute/contrib/os_networks.py:93
-#: nova/api/openstack/compute/contrib/os_networks.py:106
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:110
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:137
+#: nova/api/openstack/compute/contrib/os_networks.py:79
+#: nova/api/openstack/compute/contrib/os_networks.py:94
+#: nova/api/openstack/compute/contrib/os_networks.py:107
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:112
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:144
msgid "Network not found"
msgstr "Network not found"
@@ -3558,70 +3450,55 @@ msgstr ""
msgid "Associate host is not implemented by the configured Network API"
msgstr ""
-#: nova/api/openstack/compute/contrib/os_networks.py:81
+#: nova/api/openstack/compute/contrib/os_networks.py:82
msgid "Disassociate network is not implemented by the configured Network API"
msgstr ""
-#: nova/api/openstack/compute/contrib/os_networks.py:100
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125
-#, python-format
-msgid "Deleting network with id %s"
-msgstr "Deleting network with id %s"
-
-#: nova/api/openstack/compute/contrib/os_networks.py:118
+#: nova/api/openstack/compute/contrib/os_networks.py:119
msgid "Missing network in body"
msgstr "Missing network in body"
-#: nova/api/openstack/compute/contrib/os_networks.py:122
+#: nova/api/openstack/compute/contrib/os_networks.py:123
msgid "Network label is required"
msgstr "Network label is required"
-#: nova/api/openstack/compute/contrib/os_networks.py:126
+#: nova/api/openstack/compute/contrib/os_networks.py:127
msgid "Network cidr or cidr_v6 is required"
msgstr "Network cidr or cidr_v6 is required"
-#: nova/api/openstack/compute/contrib/os_networks.py:152
+#: nova/api/openstack/compute/contrib/os_networks.py:153
msgid "VLAN support must be enabled"
msgstr ""
-#: nova/api/openstack/compute/contrib/os_networks.py:155
+#: nova/api/openstack/compute/contrib/os_networks.py:156
#, python-format
msgid "Cannot associate network %(network)s with project %(project)s: %(message)s"
msgstr "Cannot associate network %(network)s with project %(project)s: %(message)s"
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:83
-msgid "Failed to get default networks"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:122
-#, fuzzy
-msgid "Failed to update usages deallocating network."
-msgstr "Failed to update usages deallocating floating IP"
-
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:157
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:168
#, fuzzy
msgid "No CIDR requested"
msgstr "Can not find requested image"
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:163
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:174
msgid "Requested network does not contain enough (2+) usable hosts"
msgstr ""
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:167
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:178
msgid "CIDR is malformed."
msgstr ""
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:170
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:181
#, fuzzy
msgid "Address could not be converted."
msgstr "Resource could not be found."
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:178
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:189
#, fuzzy
msgid "Quota exceeded, too many networks."
msgstr "Quota exceeded, too many key pairs."
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:191
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:202
#, fuzzy
msgid "Create networks failed"
msgstr "Create failed"
@@ -3665,23 +3542,13 @@ msgid ""
" %(quota_used)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/rescue.py:78
-#: nova/api/openstack/compute/plugins/v3/rescue.py:80
-msgid "The rescue operation is not implemented by this cloud."
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/rescue.py:98
-#: nova/api/openstack/compute/plugins/v3/rescue.py:104
-msgid "The unrescue operation is not implemented by this cloud."
-msgstr ""
-
#: nova/api/openstack/compute/contrib/scheduler_hints.py:37
#: nova/api/openstack/compute/plugins/v3/scheduler_hints.py:39
msgid "Malformed scheduler_hints attribute"
msgstr "Malformed scheduler_hints attribute"
#: nova/api/openstack/compute/contrib/security_group_default_rules.py:127
-#: nova/api/openstack/compute/contrib/security_groups.py:386
+#: nova/api/openstack/compute/contrib/security_groups.py:394
msgid "Not enough parameters to build a valid rule."
msgstr "Not enough parameters to build a valid rule."
@@ -3695,81 +3562,80 @@ msgstr "This rule already exists in group %s"
msgid "security group default rule not found"
msgstr "Security group with rule %(rule_id)s not found."
-#: nova/api/openstack/compute/contrib/security_groups.py:394
+#: nova/api/openstack/compute/contrib/security_groups.py:402
#, fuzzy, python-format
msgid "Bad prefix for network in cidr %s"
msgstr "Bad prefix for to_global_ipv6: %s"
-#: nova/api/openstack/compute/contrib/security_groups.py:484
+#: nova/api/openstack/compute/contrib/security_groups.py:491
msgid "Security group not specified"
msgstr "Security group not specified"
-#: nova/api/openstack/compute/contrib/security_groups.py:488
+#: nova/api/openstack/compute/contrib/security_groups.py:495
msgid "Security group name cannot be empty"
msgstr "Security group name cannot be empty"
-#: nova/api/openstack/compute/contrib/server_external_events.py:92
+#: nova/api/openstack/compute/contrib/server_external_events.py:93
#: nova/api/openstack/compute/plugins/v3/server_external_events.py:65
#, python-format
msgid "event entity requires key %(key)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_external_events.py:96
+#: nova/api/openstack/compute/contrib/server_external_events.py:97
#: nova/api/openstack/compute/plugins/v3/server_external_events.py:69
#, python-format
msgid "event entity contains unsupported items: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_external_events.py:102
+#: nova/api/openstack/compute/contrib/server_external_events.py:103
#: nova/api/openstack/compute/plugins/v3/server_external_events.py:75
#, python-format
msgid "Invalid event status `%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_external_events.py:121
-#: nova/api/openstack/compute/plugins/v3/server_external_events.py:94
+#: nova/api/openstack/compute/contrib/server_external_events.py:126
#, python-format
-msgid "Create event %(name)s:%(tag)s for instance %(instance_uuid)s"
+msgid "Creating event %(name)s:%(tag)s for instance %(instance_uuid)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_external_events.py:130
+#: nova/api/openstack/compute/contrib/server_external_events.py:148
#: nova/api/openstack/compute/plugins/v3/server_external_events.py:103
msgid "No instances found for any event"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:162
+#: nova/api/openstack/compute/contrib/server_groups.py:163
msgid "Conflicting policies configured!"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:167
+#: nova/api/openstack/compute/contrib/server_groups.py:168
#, python-format
msgid "Invalid policies: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:172
+#: nova/api/openstack/compute/contrib/server_groups.py:173
msgid "Duplicate policies configured!"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:177
+#: nova/api/openstack/compute/contrib/server_groups.py:178
msgid "the body is invalid."
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:186
+#: nova/api/openstack/compute/contrib/server_groups.py:187
#, python-format
msgid "'%s' is either missing or empty."
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:192
+#: nova/api/openstack/compute/contrib/server_groups.py:193
#, python-format
msgid "Invalid format for name: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:200
+#: nova/api/openstack/compute/contrib/server_groups.py:201
#, python-format
msgid "'%s' is not a list"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:204
+#: nova/api/openstack/compute/contrib/server_groups.py:205
#, python-format
msgid "unsupported fields: %s"
msgstr ""
@@ -3871,16 +3737,16 @@ msgstr ""
msgid "access_ip_v6 is not proper IPv6 format"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:170
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:172
msgid "Invalid request format for metadata"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:103
+#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:109
#, python-format
msgid "Attach interface to %s"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/cells.py:187
+#: nova/api/openstack/compute/plugins/v3/cells.py:189
#, python-format
msgid "Cell %s doesn't exist."
msgstr ""
@@ -3889,23 +3755,6 @@ msgstr ""
msgid "token not provided"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:62
-#, python-format
-msgid "create_backup entity requires %s attribute"
-msgstr ""
-
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:66
-msgid "Malformed create_backup entity"
-msgstr ""
-
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:72
-msgid "create_backup attribute 'rotation' must be an integer"
-msgstr ""
-
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:75
-msgid "create_backup attribute 'rotation' must be greater than or equal to zero"
-msgstr ""
-
#: nova/api/openstack/compute/plugins/v3/extended_volumes.py:98
msgid "The volume was either invalid or not attached to the instance."
msgstr ""
@@ -3921,96 +3770,101 @@ msgstr ""
msgid "Volume %(volume_id)s is not attached to the instance %(server_id)s"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/flavors.py:94
+#: nova/api/openstack/compute/plugins/v3/flavors.py:96
#, python-format
msgid "Invalid min_ram filter [%s]"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/flavors.py:101
+#: nova/api/openstack/compute/plugins/v3/flavors.py:103
#, python-format
msgid "Invalid min_disk filter [%s]"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:66
-msgid "No or bad extra_specs provided"
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:132
+msgid "Need parameter 'query' to specify which hypervisor to filter on"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:73
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:95
-msgid "Concurrent transaction has been committed, try again"
+#: nova/api/openstack/compute/plugins/v3/pause_server.py:59
+#: nova/api/openstack/compute/plugins/v3/pause_server.py:81
+msgid "Virt driver does not implement pause function."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/hosts.py:120
-msgid "The request body invalid"
+#: nova/api/openstack/compute/plugins/v3/server_actions.py:76
+#, python-format
+msgid "Action %s not found"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:125
-msgid "Need parameter 'query' to specify which hypervisor to filter on"
+#: nova/api/openstack/compute/plugins/v3/server_diagnostics.py:46
+msgid "Unable to get diagnostics, functionality not implemented"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/server_actions.py:76
+#: nova/api/openstack/compute/plugins/v3/server_external_events.py:94
#, python-format
-msgid "Action %s not found"
+msgid "Create event %(name)s:%(tag)s for instance %(instance_uuid)s"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:212
+#: nova/api/openstack/compute/plugins/v3/servers.py:235
msgid "Invalid changes_since value"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:335
+#: nova/api/openstack/compute/plugins/v3/servers.py:306
+#, fuzzy, python-format
+msgid "Flavor '%s' could not be found "
+msgstr "Host '%s' could not be found."
+
+#: nova/api/openstack/compute/plugins/v3/servers.py:358
msgid "Unknown argument: port"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:343
+#: nova/api/openstack/compute/plugins/v3/servers.py:366
#, python-format
msgid ""
"Specified Fixed IP '%(addr)s' cannot be used with port '%(port)s': port "
"already has a Fixed IP allocated."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:412
-#: nova/api/openstack/compute/plugins/v3/servers.py:585
-msgid "The request body is invalid"
+#: nova/api/openstack/compute/plugins/v3/servers.py:494
+#: nova/api/openstack/compute/plugins/v3/servers.py:522
+msgid "Invalid flavor_ref provided."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:470
-#: nova/api/openstack/compute/plugins/v3/servers.py:498
-msgid "Invalid flavor_ref provided."
+#: nova/api/openstack/compute/plugins/v3/servers.py:620
+msgid "The request body is invalid"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:596
+#: nova/api/openstack/compute/plugins/v3/servers.py:631
msgid "host_id cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:741
+#: nova/api/openstack/compute/plugins/v3/servers.py:782
msgid "Invalid image_ref provided."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:760
+#: nova/api/openstack/compute/plugins/v3/servers.py:801
msgid "Missing image_ref attribute"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:767
+#: nova/api/openstack/compute/plugins/v3/servers.py:808
msgid "Missing flavor_ref attribute"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:780
+#: nova/api/openstack/compute/plugins/v3/servers.py:822
msgid "Resize request has invalid 'flavor_ref' attribute."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:783
+#: nova/api/openstack/compute/plugins/v3/servers.py:825
msgid "Resize requests require 'flavor_ref' attribute."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:799
+#: nova/api/openstack/compute/plugins/v3/servers.py:842
msgid "Could not parse image_ref from request."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:883
+#: nova/api/openstack/compute/plugins/v3/servers.py:927
msgid "create_image entity requires name attribute"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:945
+#: nova/api/openstack/compute/plugins/v3/servers.py:989
msgid "Invalid admin_password"
msgstr ""
@@ -4018,16 +3872,12 @@ msgstr ""
msgid "Disabled reason contains invalid characters or is too long"
msgstr ""
-#: nova/api/openstack/compute/views/servers.py:197
-msgid "Instance has had its instance_type removed from the DB"
-msgstr "Instance has had its instance_type removed from the DB"
-
-#: nova/api/validation/validators.py:61
+#: nova/api/validation/validators.py:73
#, python-format
msgid "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s"
msgstr ""
-#: nova/cells/manager.py:78
+#: nova/cells/manager.py:79
msgid ""
"The cells feature of Nova is considered experimental by the OpenStack "
"project because it receives much less testing than the rest of Nova. This"
@@ -4035,117 +3885,122 @@ msgid ""
" use of it in production right now may be risky."
msgstr ""
-#: nova/cells/messaging.py:205
+#: nova/cells/messaging.py:204
#, fuzzy, python-format
msgid "Error processing message locally: %(exc)s"
msgstr "Error processing message. Skipping it."
-#: nova/cells/messaging.py:366 nova/cells/messaging.py:374
+#: nova/cells/messaging.py:365 nova/cells/messaging.py:373
#, python-format
msgid "destination is %(target_cell)s but routing_path is %(routing_path)s"
msgstr ""
-#: nova/cells/messaging.py:386
+#: nova/cells/messaging.py:385
#, python-format
msgid "Unknown %(cell_type)s when routing to %(target_cell)s"
msgstr ""
-#: nova/cells/messaging.py:410
+#: nova/cells/messaging.py:409
#, fuzzy, python-format
msgid "Error locating next hop for message: %(exc)s"
msgstr "no method for message: %s"
-#: nova/cells/messaging.py:437
+#: nova/cells/messaging.py:436
#, fuzzy, python-format
msgid "Failed to send message to cell: %(next_hop)s: %(exc)s"
msgstr "Failed to load extension %(ext_factory)s: %(exc)s"
-#: nova/cells/messaging.py:516
+#: nova/cells/messaging.py:515
#, fuzzy, python-format
msgid "Error locating next hops for message: %(exc)s"
msgstr "no method for message: %s"
-#: nova/cells/messaging.py:536
+#: nova/cells/messaging.py:535
#, fuzzy, python-format
msgid "Error sending message to next hops: %(exc)s"
msgstr "Sending message(s) to: %s"
-#: nova/cells/messaging.py:554
+#: nova/cells/messaging.py:553
#, python-format
msgid "Error waiting for responses from neighbor cells: %(exc)s"
msgstr ""
-#: nova/cells/messaging.py:665
+#: nova/cells/messaging.py:664
#, fuzzy, python-format
msgid "Unknown method '%(method)s' in compute API"
msgstr "Casted '%(method)s' to compute '%(host)s'"
-#: nova/cells/messaging.py:1096
+#: nova/cells/messaging.py:1106
#, python-format
msgid "Got message to create instance fault: %(instance_fault)s"
msgstr ""
-#: nova/cells/messaging.py:1119
+#: nova/cells/messaging.py:1129
#, python-format
msgid ""
"Forcing a sync of instances, project_id=%(projid_str)s, "
"updated_since=%(since_str)s"
msgstr ""
-#: nova/cells/messaging.py:1198
+#: nova/cells/messaging.py:1208
#, python-format
msgid "No match when trying to update BDM: %(bdm)s"
msgstr ""
-#: nova/cells/messaging.py:1673
+#: nova/cells/messaging.py:1683
#, python-format
msgid "No cell_name for %(method)s() from API"
msgstr ""
-#: nova/cells/messaging.py:1690
+#: nova/cells/messaging.py:1700
msgid "No cell_name for instance update from API"
msgstr ""
-#: nova/cells/messaging.py:1853
+#: nova/cells/messaging.py:1863
#, python-format
msgid "Returning exception %s to caller"
msgstr "Returning exception %s to caller"
-#: nova/cells/rpcapi.py:369
+#: nova/cells/rpcapi.py:378
msgid "Failed to notify cells of BDM update/create."
msgstr ""
-#: nova/cells/rpcapi.py:385
+#: nova/cells/rpcapi.py:394
msgid "Failed to notify cells of BDM destroy."
msgstr ""
-#: nova/cells/scheduler.py:192
+#: nova/cells/scheduler.py:191
#, python-format
msgid "Couldn't communicate with cell '%s'"
msgstr ""
-#: nova/cells/scheduler.py:196
+#: nova/cells/scheduler.py:195
msgid "Couldn't communicate with any cells"
msgstr ""
-#: nova/cells/scheduler.py:234
+#: nova/cells/scheduler.py:233
#, python-format
msgid ""
"No cells available when scheduling. Will retry in %(sleep_time)s "
"second(s)"
msgstr ""
-#: nova/cells/scheduler.py:240
+#: nova/cells/scheduler.py:239
#, fuzzy, python-format
msgid "Error scheduling instances %(instance_uuids)s"
msgstr "Destroying VDIs for Instance %(instance_uuid)s"
-#: nova/cells/state.py:352
+#: nova/cells/state.py:182
+#, python-format
+msgid "DB error: %s"
+msgstr "DB error: %s"
+
+#: nova/cells/state.py:363
#, python-format
msgid "Unknown cell '%(cell_name)s' when trying to update capabilities"
msgstr ""
-#: nova/cells/state.py:367
+#: nova/cells/state.py:378
#, python-format
msgid "Unknown cell '%(cell_name)s' when trying to update capacities"
msgstr ""
@@ -4185,71 +4040,71 @@ msgstr "Netmask to push into openvpn config"
msgid "Failed to load %s"
msgstr "Failed to create VM %s"
-#: nova/cmd/baremetal_deploy_helper.py:211
+#: nova/cmd/baremetal_deploy_helper.py:210
#, python-format
msgid "parent device '%s' not found"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:214
+#: nova/cmd/baremetal_deploy_helper.py:213
#, python-format
msgid "root device '%s' not found"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:216
+#: nova/cmd/baremetal_deploy_helper.py:215
#, python-format
msgid "swap device '%s' not found"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:218
+#: nova/cmd/baremetal_deploy_helper.py:217
#, python-format
msgid "ephemeral device '%s' not found"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:228
+#: nova/cmd/baremetal_deploy_helper.py:227
msgid "Failed to detect root device UUID."
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:252
+#: nova/cmd/baremetal_deploy_helper.py:251
#, python-format
msgid "Cmd : %s"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:253
+#: nova/cmd/baremetal_deploy_helper.py:252
#, python-format
msgid "StdOut : %r"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:254
+#: nova/cmd/baremetal_deploy_helper.py:253
#, python-format
msgid "StdErr : %r"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:282
+#: nova/cmd/baremetal_deploy_helper.py:281
#, python-format
msgid "start deployment for node %(node_id)s, params %(params)s"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:291
+#: nova/cmd/baremetal_deploy_helper.py:290
#, fuzzy, python-format
msgid "deployment to node %s failed"
msgstr "element is not a child"
-#: nova/cmd/baremetal_deploy_helper.py:295
+#: nova/cmd/baremetal_deploy_helper.py:294
#, python-format
msgid "deployment to node %s done"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:317
+#: nova/cmd/baremetal_deploy_helper.py:316
#, python-format
msgid "post: environ=%s"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:336
+#: nova/cmd/baremetal_deploy_helper.py:335
#, python-format
msgid "Deploy agent error message: %s"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:360
+#: nova/cmd/baremetal_deploy_helper.py:359
#, python-format
msgid "request is queued: node %(node_id)s, params %(params)s"
msgstr ""
@@ -4276,17 +4131,17 @@ msgstr ""
msgid "No db access allowed in nova-compute: %s"
msgstr ""
-#: nova/cmd/dhcpbridge.py:109
+#: nova/cmd/dhcpbridge.py:108
#, python-format
msgid "No db access allowed in nova-dhcpbridge: %s"
msgstr ""
-#: nova/cmd/dhcpbridge.py:132
+#: nova/cmd/dhcpbridge.py:131
#, python-format
msgid "Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'"
msgstr ""
-#: nova/cmd/dhcpbridge.py:142
+#: nova/cmd/dhcpbridge.py:141
msgid "Environment variable 'NETWORK_ID' must be set."
msgstr ""
@@ -4371,40 +4226,40 @@ msgid ""
"Use python-neutronclient instead."
msgstr ""
-#: nova/cmd/manage.py:551 nova/tests/test_nova_manage.py:217
+#: nova/cmd/manage.py:551 nova/tests/test_nova_manage.py:218
msgid "id"
msgstr "id"
-#: nova/cmd/manage.py:552 nova/tests/test_nova_manage.py:218
+#: nova/cmd/manage.py:552 nova/tests/test_nova_manage.py:219
msgid "IPv4"
msgstr "IPv4"
-#: nova/cmd/manage.py:553 nova/tests/test_nova_manage.py:219
+#: nova/cmd/manage.py:553 nova/tests/test_nova_manage.py:220
msgid "IPv6"
msgstr "IPv6"
-#: nova/cmd/manage.py:554 nova/tests/test_nova_manage.py:220
+#: nova/cmd/manage.py:554 nova/tests/test_nova_manage.py:221
msgid "start address"
msgstr "start address"
-#: nova/cmd/manage.py:555 nova/tests/test_nova_manage.py:221
+#: nova/cmd/manage.py:555 nova/tests/test_nova_manage.py:222
msgid "DNS1"
msgstr "DNS1"
-#: nova/cmd/manage.py:556 nova/tests/test_nova_manage.py:222
+#: nova/cmd/manage.py:556 nova/tests/test_nova_manage.py:223
msgid "DNS2"
msgstr "DNS2"
-#: nova/cmd/manage.py:557 nova/tests/test_nova_manage.py:223
+#: nova/cmd/manage.py:557 nova/tests/test_nova_manage.py:224
msgid "VlanID"
msgstr "VlanID"
#: nova/cmd/manage.py:558 nova/cmd/manage.py:665
-#: nova/tests/test_nova_manage.py:224
+#: nova/tests/test_nova_manage.py:225
msgid "project"
msgstr "project"
-#: nova/cmd/manage.py:559 nova/tests/test_nova_manage.py:225
+#: nova/cmd/manage.py:559 nova/tests/test_nova_manage.py:226
msgid "uuid"
msgstr "uuid"
@@ -4620,665 +4475,511 @@ msgstr ""
msgid "No db access allowed in nova-network: %s"
msgstr ""
-#: nova/compute/api.py:362
+#: nova/compute/api.py:355
msgid "Cannot run any more instances of this type."
msgstr "Cannot run any more instances of this type."
-#: nova/compute/api.py:369
+#: nova/compute/api.py:362
#, python-format
msgid "Can only run %s more instances of this type."
msgstr "Can only run %s more instances of this type."
-#: nova/compute/api.py:381
+#: nova/compute/api.py:374
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)d "
"instances. %(msg)s"
msgstr ""
-#: nova/compute/api.py:385
+#: nova/compute/api.py:378
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run between %(min_count)d "
"and %(max_count)d instances. %(msg)s"
msgstr ""
-#: nova/compute/api.py:406
+#: nova/compute/api.py:399
msgid "Metadata type should be dict."
msgstr ""
-#: nova/compute/api.py:412
-#, python-format
-msgid ""
-"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata "
-"properties"
-msgstr ""
-"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata "
-"properties"
-
-#: nova/compute/api.py:424
-#, python-format
-msgid "Metadata property key '%s' is not a string."
-msgstr ""
-
-#: nova/compute/api.py:427
-#, python-format
-msgid "Metadata property value '%(v)s' for key '%(k)s' is not a string."
-msgstr ""
-
-#: nova/compute/api.py:431
-msgid "Metadata property key blank"
-msgstr "Metadata property key blank"
-
-#: nova/compute/api.py:434
+#: nova/compute/api.py:421
msgid "Metadata property key greater than 255 characters"
msgstr "Metadata property key greater than 255 characters"
-#: nova/compute/api.py:437
+#: nova/compute/api.py:424
msgid "Metadata property value greater than 255 characters"
msgstr "Metadata property value greater than 255 characters"
-#: nova/compute/api.py:574
-msgid "Failed to set instance name using multi_instance_display_name_template."
-msgstr ""
-
-#: nova/compute/api.py:676
+#: nova/compute/api.py:663
#, fuzzy
msgid "Cannot attach one or more volumes to multiple instances"
msgstr "Unable to attach boot volume to instance %s"
-#: nova/compute/api.py:718
+#: nova/compute/api.py:705
msgid "The requested availability zone is not available"
msgstr ""
-#: nova/compute/api.py:1119
+#: nova/compute/api.py:1107
msgid ""
"Images with destination_type 'volume' need to have a non-zero size "
"specified"
msgstr ""
-#: nova/compute/api.py:1150
+#: nova/compute/api.py:1138
msgid "More than one swap drive requested."
msgstr ""
-#: nova/compute/api.py:1299
-#: nova/tests/api/openstack/compute/test_servers.py:3122
-#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2460
+#: nova/compute/api.py:1277
+#: nova/tests/api/openstack/compute/test_servers.py:3199
+#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2488
msgid ""
"Unable to launch multiple instances with a single configured port ID. "
"Please launch your instance one by one with different ports."
msgstr ""
-#: nova/compute/api.py:1401
+#: nova/compute/api.py:1298
+msgid "max_count cannot be greater than 1 if an fixed_ip is specified."
+msgstr ""
+
+#: nova/compute/api.py:1404
#, fuzzy
msgid "instance termination disabled"
msgstr "Going to start terminating instances"
-#: nova/compute/api.py:1416
+#: nova/compute/api.py:1418
#, python-format
msgid "Working on deleting snapshot %s from shelved instance..."
msgstr ""
-#: nova/compute/api.py:1423
+#: nova/compute/api.py:1425
#, python-format
msgid "Failed to delete snapshot from shelved instance (%s)."
msgstr ""
-#: nova/compute/api.py:1427
-msgid ""
-"Something wrong happened when trying to delete snapshot from shelved "
-"instance."
-msgstr ""
-
-#: nova/compute/api.py:1492
+#: nova/compute/api.py:1486
msgid "Instance is already in deleting state, ignoring this request"
msgstr ""
-#: nova/compute/api.py:1540
+#: nova/compute/api.py:1521
#, python-format
msgid ""
"Found an unconfirmed migration during delete, id: %(id)s, status: "
"%(status)s"
msgstr ""
-#: nova/compute/api.py:1550
+#: nova/compute/api.py:1531
msgid "Instance may have been confirmed during delete"
msgstr ""
-#: nova/compute/api.py:1567
+#: nova/compute/api.py:1548
#, python-format
msgid "Migration %s may have been confirmed during delete"
msgstr ""
-#: nova/compute/api.py:1603
+#: nova/compute/api.py:1583
#, python-format
msgid "Flavor %d not found"
msgstr ""
-#: nova/compute/api.py:1621
+#: nova/compute/api.py:1603
#, fuzzy, python-format
msgid "instance's host %s is down, deleting from database"
msgstr "host for instance is down, deleting from database"
-#: nova/compute/api.py:1648 nova/compute/manager.py:2279
+#: nova/compute/api.py:1630
#, python-format
msgid "Ignoring volume cleanup failure due to %s"
msgstr "Ignoring volume cleanup failure due to %s"
-#: nova/compute/api.py:2043
+#: nova/compute/api.py:2030
#, python-format
msgid "snapshot for %s"
msgstr "snapshot for %s"
-#: nova/compute/api.py:2415
+#: nova/compute/api.py:2368
+msgid "Resize to zero disk flavor is not allowed."
+msgstr ""
+
+#: nova/compute/api.py:2407
#, fuzzy, python-format
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance."
msgstr "%(overs)s quota exceeded for %(pid)s, tried to resize instance. %(msg)s"
-#: nova/compute/api.py:2584
+#: nova/compute/api.py:2582
msgid "Cannot rescue a volume-backed instance"
msgstr ""
-#: nova/compute/api.py:2811
+#: nova/compute/api.py:2809
msgid "Volume must be attached in order to detach."
msgstr "Volume must be attached in order to detach."
-#: nova/compute/api.py:2831
+#: nova/compute/api.py:2829
msgid "Old volume is attached to a different instance."
msgstr ""
-#: nova/compute/api.py:2834
+#: nova/compute/api.py:2832
msgid "New volume must be detached in order to swap."
msgstr ""
-#: nova/compute/api.py:2837
+#: nova/compute/api.py:2835
msgid "New volume must be the same size or larger."
msgstr ""
-#: nova/compute/api.py:3032
+#: nova/compute/api.py:3042
#, python-format
msgid "Instance compute service state on %s expected to be down, but it was up."
msgstr ""
-#: nova/compute/api.py:3335
+#: nova/compute/api.py:3347
msgid "Host aggregate is not empty"
msgstr ""
-#: nova/compute/api.py:3368
+#: nova/compute/api.py:3380
#, python-format
msgid "More than 1 AZ for host %s"
msgstr ""
-#: nova/compute/api.py:3403
+#: nova/compute/api.py:3415
#, python-format
msgid "Host already in availability zone %s"
msgstr ""
-#: nova/compute/api.py:3491 nova/tests/compute/test_keypairs.py:135
+#: nova/compute/api.py:3503 nova/tests/compute/test_keypairs.py:137
msgid "Keypair name contains unsafe characters"
msgstr "Keypair name contains unsafe characters"
-#: nova/compute/api.py:3495 nova/tests/compute/test_keypairs.py:127
-#: nova/tests/compute/test_keypairs.py:131
-msgid "Keypair name must be between 1 and 255 characters long"
-msgstr "Keypair name must be between 1 and 255 characters long"
+#: nova/compute/api.py:3509 nova/tests/compute/test_keypairs.py:127
+#: nova/tests/compute/test_keypairs.py:132
+msgid "Keypair name must be string and between 1 and 255 characters long"
+msgstr ""
-#: nova/compute/api.py:3583
+#: nova/compute/api.py:3597
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr "Security group %s is not a string or unicode"
-#: nova/compute/api.py:3586
-#, python-format
-msgid "Security group %s cannot be empty."
-msgstr "Security group %s cannot be empty."
-
-#: nova/compute/api.py:3594
+#: nova/compute/api.py:3607
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)s'."
msgstr ""
-#: nova/compute/api.py:3600
-#, python-format
-msgid "Security group %s should not be greater than 255 characters."
-msgstr "Security group %s should not be greater than 255 characters."
-
-#: nova/compute/api.py:3618
+#: nova/compute/api.py:3627
msgid "Quota exceeded, too many security groups."
msgstr "Quota exceeded, too many security groups."
-#: nova/compute/api.py:3621
+#: nova/compute/api.py:3630
#, python-format
msgid "Create Security Group %s"
msgstr "Create Security Group %s"
-#: nova/compute/api.py:3633
+#: nova/compute/api.py:3642
#, python-format
msgid "Security group %s already exists"
msgstr "Security group %s already exists"
-#: nova/compute/api.py:3646
+#: nova/compute/api.py:3655
#, fuzzy, python-format
msgid "Unable to update system group '%s'"
msgstr "Unable to destroy vbd %s"
-#: nova/compute/api.py:3708
+#: nova/compute/api.py:3717
#, fuzzy, python-format
msgid "Unable to delete system group '%s'"
msgstr "Unable to destroy vbd %s"
-#: nova/compute/api.py:3713
+#: nova/compute/api.py:3722
msgid "Security group is still in use"
msgstr "Security group is still in use"
-#: nova/compute/api.py:3723
-msgid "Failed to update usages deallocating security group"
-msgstr "Failed to update usages deallocating security group"
-
-#: nova/compute/api.py:3726
+#: nova/compute/api.py:3735
#, python-format
msgid "Delete security group %s"
msgstr "Delete security group %s"
-#: nova/compute/api.py:3802 nova/compute/api.py:3885
+#: nova/compute/api.py:3811 nova/compute/api.py:3894
#, python-format
msgid "Rule (%s) not found"
msgstr "Rule (%s) not found"
-#: nova/compute/api.py:3818
+#: nova/compute/api.py:3827
msgid "Quota exceeded, too many security group rules."
msgstr "Quota exceeded, too many security group rules."
-#: nova/compute/api.py:3821
+#: nova/compute/api.py:3830
#, python-format
msgid ""
"Security group %(name)s added %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)"
msgstr ""
-#: nova/compute/api.py:3836
+#: nova/compute/api.py:3845
#, python-format
msgid ""
"Security group %(name)s removed %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)"
msgstr ""
-#: nova/compute/api.py:3892
+#: nova/compute/api.py:3901
msgid "Security group id should be integer"
msgstr "Security group id should be integer"
-#: nova/compute/claims.py:135
+#: nova/compute/claims.py:126
#, python-format
-msgid ""
-"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs "
-"%(vcpus)d"
+msgid "Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB"
msgstr ""
-"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs "
-"%(vcpus)d"
-#: nova/compute/claims.py:150
+#: nova/compute/claims.py:140
msgid "Claim successful"
msgstr ""
-#: nova/compute/claims.py:153
+#: nova/compute/claims.py:143
msgid "memory"
msgstr ""
-#: nova/compute/claims.py:162
+#: nova/compute/claims.py:152
msgid "disk"
msgstr ""
-#: nova/compute/claims.py:177 nova/compute/claims.py:249
+#: nova/compute/claims.py:167 nova/compute/claims.py:230
msgid "Claim pci failed."
msgstr ""
-#: nova/compute/claims.py:180
-msgid "CPUs"
-msgstr ""
-
-#: nova/compute/claims.py:192
+#: nova/compute/claims.py:177
#, fuzzy, python-format
msgid "Total %(type)s: %(total)d %(unit)s, used: %(used).02f %(unit)s"
msgstr "Total VCPUs: %(total_vcpus)d, used: %(used_vcpus)d"
-#: nova/compute/claims.py:199
+#: nova/compute/claims.py:184
#, fuzzy, python-format
msgid "%(type)s limit not specified, defaulting to unlimited"
msgstr "Disk limit not specified, defaulting to unlimited"
-#: nova/compute/claims.py:206
+#: nova/compute/claims.py:191
#, fuzzy, python-format
msgid "%(type)s limit: %(limit).02f %(unit)s, free: %(free).02f %(unit)s"
msgstr "Disk limit: %(disk_gb_limit)d GB, free: %(free_disk_gb)d GB"
-#: nova/compute/claims.py:212
+#: nova/compute/claims.py:197
#, python-format
msgid "Free %(type)s %(free).02f %(unit)s < requested %(requested)d %(unit)s"
msgstr ""
-#: nova/compute/flavors.py:109
+#: nova/compute/flavors.py:110
msgid ""
"Flavor names can only contain alphanumeric characters, periods, dashes, "
"underscores and spaces."
msgstr ""
-#: nova/compute/flavors.py:119
+#: nova/compute/flavors.py:120
msgid "id cannot contain leading and/or trailing whitespace(s)"
msgstr ""
-#: nova/compute/flavors.py:129
+#: nova/compute/flavors.py:130
msgid ""
"Flavor id can only contain letters from A-Z (both cases), periods, "
"dashes, underscores and spaces."
msgstr ""
-#: nova/compute/flavors.py:150
+#: nova/compute/flavors.py:151
#, python-format
msgid "'rxtx_factor' argument must be a float between 0 and %g"
msgstr ""
-#: nova/compute/flavors.py:161
+#: nova/compute/flavors.py:162
msgid "is_public must be a boolean"
msgstr ""
-#: nova/compute/flavors.py:166
-#, python-format
-msgid "DB error: %s"
-msgstr "DB error: %s"
-
-#: nova/compute/flavors.py:177
-#, python-format
-msgid "Instance type %s not found for deletion"
-msgstr "Instance type %s not found for deletion"
-
-#: nova/compute/flavors.py:327
+#: nova/compute/flavors.py:328
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
msgstr ""
-#: nova/compute/manager.py:278
+#: nova/compute/manager.py:284
#, python-format
msgid "Task possibly preempted: %s"
msgstr ""
-#: nova/compute/manager.py:360 nova/compute/manager.py:2849
-#, python-format
-msgid "Error while trying to clean up image %s"
-msgstr ""
-
-#: nova/compute/manager.py:501
+#: nova/compute/manager.py:508
msgid "Instance event failed"
msgstr ""
-#: nova/compute/manager.py:600
+#: nova/compute/manager.py:608
#, python-format
msgid "%s is not a valid node managed by this compute host."
msgstr ""
-#: nova/compute/manager.py:698
+#: nova/compute/manager.py:714
#, fuzzy, python-format
msgid ""
"Deleting instance as its host (%(instance_host)s) is not equal to our "
"host (%(our_host)s)."
msgstr "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
-#: nova/compute/manager.py:713
+#: nova/compute/manager.py:729
msgid "Instance has been marked deleted already, removing it from the hypervisor."
msgstr ""
-#: nova/compute/manager.py:733
+#: nova/compute/manager.py:749
msgid ""
"Hypervisor driver does not support instance shared storage check, "
"assuming it's not on shared storage"
msgstr ""
-#: nova/compute/manager.py:739
-#, fuzzy
-msgid "Failed to check if instance shared"
-msgstr "Failed to terminate instance"
-
-#: nova/compute/manager.py:805 nova/compute/manager.py:856
-msgid "Failed to complete a deletion"
-msgstr ""
-
-#: nova/compute/manager.py:838
+#: nova/compute/manager.py:854
msgid ""
"Service started deleting the instance during the previous run, but did "
"not finish. Restarting the deletion now."
msgstr ""
-#: nova/compute/manager.py:879
+#: nova/compute/manager.py:895
#, python-format
msgid ""
"Instance in transitional state (%(task_state)s) at start-up and power "
"state is (%(power_state)s), clearing task state"
msgstr ""
-#: nova/compute/manager.py:897
-msgid "Failed to stop instance"
-msgstr ""
-
-#: nova/compute/manager.py:909
-msgid "Failed to start instance"
-msgstr ""
-
-#: nova/compute/manager.py:934
-msgid "Failed to revert crashed migration"
-msgstr ""
-
-#: nova/compute/manager.py:937
+#: nova/compute/manager.py:953
msgid "Instance found in migrating state during startup. Resetting task_state"
msgstr ""
-#: nova/compute/manager.py:954
+#: nova/compute/manager.py:970
msgid "Rebooting instance after nova-compute restart."
msgstr "Rebooting instance after nova-compute restart."
-#: nova/compute/manager.py:964
+#: nova/compute/manager.py:980
msgid "Hypervisor driver does not support resume guests"
msgstr "Hypervisor driver does not support resume guests"
-#: nova/compute/manager.py:969
+#: nova/compute/manager.py:985
#, fuzzy
msgid "Failed to resume instance"
msgstr "Failed to suspend instance"
-#: nova/compute/manager.py:978
+#: nova/compute/manager.py:994
msgid "Hypervisor driver does not support firewall rules"
msgstr "Hypervisor driver does not support firewall rules"
-#: nova/compute/manager.py:1003
+#: nova/compute/manager.py:1019
#, python-format
-msgid "Lifecycle event %(state)d on VM %(uuid)s"
+msgid "VM %(state)s (Lifecycle Event)"
msgstr ""
-#: nova/compute/manager.py:1019
+#: nova/compute/manager.py:1035
#, fuzzy, python-format
msgid "Unexpected power state %d"
msgstr "Unexpected status code"
-#: nova/compute/manager.py:1124
+#: nova/compute/manager.py:1140
msgid "Hypervisor driver does not support security groups."
msgstr ""
-#: nova/compute/manager.py:1164
+#: nova/compute/manager.py:1178
#, python-format
msgid "Volume id: %s finished being created but was not set as 'available'"
msgstr ""
-#: nova/compute/manager.py:1222 nova/compute/manager.py:1978
+#: nova/compute/manager.py:1235 nova/compute/manager.py:2064
msgid "Success"
msgstr ""
-#: nova/compute/manager.py:1246
+#: nova/compute/manager.py:1259
msgid "Instance disappeared before we could start it"
msgstr ""
-#: nova/compute/manager.py:1274
+#: nova/compute/manager.py:1286
msgid "Anti-affinity instance group policy was violated."
msgstr ""
-#: nova/compute/manager.py:1351
-msgid "Failed to dealloc network for deleted instance"
-msgstr "Failed to dealloc network for deleted instance"
-
-#: nova/compute/manager.py:1356
+#: nova/compute/manager.py:1369
msgid "Instance disappeared during build"
msgstr ""
-#: nova/compute/manager.py:1372
-msgid "Failed to dealloc network for failed instance"
-msgstr ""
-
-#: nova/compute/manager.py:1399
+#: nova/compute/manager.py:1412
#, fuzzy, python-format
msgid "Error: %s"
msgstr "DB error: %s"
-#: nova/compute/manager.py:1445 nova/compute/manager.py:3473
-msgid "Error trying to reschedule"
-msgstr "Error trying to reschedule"
-
-#: nova/compute/manager.py:1500
+#: nova/compute/manager.py:1514
msgid "Instance build timed out. Set to error state."
msgstr "Instance build timed out. Set to error state."
-#: nova/compute/manager.py:1510 nova/compute/manager.py:1870
+#: nova/compute/manager.py:1524 nova/compute/manager.py:1894
msgid "Starting instance..."
msgstr "Starting instance..."
-#: nova/compute/manager.py:1528
+#: nova/compute/manager.py:1542
#, python-format
msgid ""
"Treating negative config value (%(retries)s) for "
"'network_allocate_retries' as 0."
msgstr ""
-#: nova/compute/manager.py:1553
-#, python-format
-msgid "Instance failed network setup after %(attempts)d attempt(s)"
-msgstr ""
-
-#: nova/compute/manager.py:1557
+#: nova/compute/manager.py:1571
#, python-format
msgid "Instance failed network setup (attempt %(attempt)d of %(attempts)d)"
msgstr ""
-#: nova/compute/manager.py:1738
-msgid "Instance failed block device setup"
-msgstr "Instance failed block device setup"
-
-#: nova/compute/manager.py:1758 nova/compute/manager.py:2086
-#: nova/compute/manager.py:3985
-msgid "Instance failed to spawn"
-msgstr "Instance failed to spawn"
-
-#: nova/compute/manager.py:1937
-msgid "Unexpected build failure, not rescheduling build."
-msgstr ""
-
-#: nova/compute/manager.py:2002
+#: nova/compute/manager.py:2027
#, python-format
msgid "Failed to allocate the network(s) with error %s, not rescheduling."
msgstr ""
-#: nova/compute/manager.py:2008 nova/compute/manager.py:2048
-msgid "Failed to allocate network(s)"
-msgstr ""
-
-#: nova/compute/manager.py:2012 nova/compute/manager.py:2050
+#: nova/compute/manager.py:2037 nova/compute/manager.py:2087
msgid "Failed to allocate the network(s), not rescheduling."
msgstr ""
-#: nova/compute/manager.py:2074
-msgid "Failure prepping block device"
-msgstr ""
-
-#: nova/compute/manager.py:2076
+#: nova/compute/manager.py:2113
msgid "Failure prepping block device."
msgstr ""
-#: nova/compute/manager.py:2099
+#: nova/compute/manager.py:2134
msgid "Could not clean up failed build, not rescheduling"
msgstr ""
-#: nova/compute/manager.py:2109
-msgid "Failed to deallocate networks"
-msgstr ""
-
-#: nova/compute/manager.py:2130
-msgid "Failed to cleanup volumes for failed build, not rescheduling"
-msgstr ""
-
-#: nova/compute/manager.py:2169
+#: nova/compute/manager.py:2192
#, fuzzy
msgid "Failed to deallocate network for instance."
msgstr "Failed to dealloc network for deleted instance"
-#: nova/compute/manager.py:2178
+#: nova/compute/manager.py:2213
#, python-format
msgid "%(action_str)s instance"
msgstr "%(action_str)s instance"
-#: nova/compute/manager.py:2222
-#, python-format
-msgid "Ignoring DiskNotFound: %s"
-msgstr "Ignoring DiskNotFound: %s"
-
-#: nova/compute/manager.py:2225
-#, python-format
-msgid "Ignoring VolumeNotFound: %s"
-msgstr "Ignoring VolumeNotFound: %s"
-
-#: nova/compute/manager.py:2324
+#: nova/compute/manager.py:2368
msgid "Instance disappeared during terminate"
msgstr ""
-#: nova/compute/manager.py:2330 nova/compute/manager.py:3653
-#: nova/compute/manager.py:5671
-msgid "Setting instance vm_state to ERROR"
-msgstr ""
-
-#: nova/compute/manager.py:2503
+#: nova/compute/manager.py:2554
msgid "Rebuilding instance"
msgstr "Rebuilding instance"
-#: nova/compute/manager.py:2516
+#: nova/compute/manager.py:2567
msgid "Invalid state of instance files on shared storage"
msgstr ""
-#: nova/compute/manager.py:2520
+#: nova/compute/manager.py:2571
msgid "disk on shared storage, recreating using existing disk"
msgstr ""
-#: nova/compute/manager.py:2524
+#: nova/compute/manager.py:2575
#, python-format
msgid "disk not on shared storage, rebuilding from: '%s'"
msgstr ""
-#: nova/compute/manager.py:2535 nova/compute/manager.py:4790
-#, fuzzy, python-format
-msgid "Failed to get compute_info for %s"
-msgstr "Failed to get info for disk %s"
-
-#: nova/compute/manager.py:2611
-#, python-format
-msgid "bringing vm to original state: '%s'"
-msgstr ""
-
-#: nova/compute/manager.py:2642
+#: nova/compute/manager.py:2694
#, fuzzy, python-format
msgid "Detaching from volume api: %s"
msgstr "Attach boot from volume failed: %s"
-#: nova/compute/manager.py:2669
+#: nova/compute/manager.py:2721
msgid "Rebooting instance"
msgstr "Rebooting instance"
-#: nova/compute/manager.py:2686
+#: nova/compute/manager.py:2738
#, python-format
msgid ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
@@ -5287,25 +4988,25 @@ msgstr ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
-#: nova/compute/manager.py:2722
+#: nova/compute/manager.py:2774
msgid "Reboot failed but instance is running"
msgstr ""
-#: nova/compute/manager.py:2730
+#: nova/compute/manager.py:2782
#, python-format
msgid "Cannot reboot instance: %s"
msgstr ""
-#: nova/compute/manager.py:2742
+#: nova/compute/manager.py:2794
#, fuzzy
msgid "Instance disappeared during reboot"
msgstr "instance %s: rebooted"
-#: nova/compute/manager.py:2810
+#: nova/compute/manager.py:2862
msgid "instance snapshotting"
msgstr "instance snapshotting"
-#: nova/compute/manager.py:2816
+#: nova/compute/manager.py:2868
#, python-format
msgid ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
@@ -5314,197 +5015,162 @@ msgstr ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
-#: nova/compute/manager.py:2854
+#: nova/compute/manager.py:2901
+#, python-format
+msgid "Error while trying to clean up image %s"
+msgstr ""
+
+#: nova/compute/manager.py:2906
msgid "Image not found during snapshot"
msgstr ""
-#: nova/compute/manager.py:2936
+#: nova/compute/manager.py:2988
#, python-format
msgid "Failed to set admin password. Instance %s is not running"
msgstr "Failed to set admin password. Instance %s is not running"
-#: nova/compute/manager.py:2943
+#: nova/compute/manager.py:2995
msgid "Root password set"
msgstr "Root password set"
-#: nova/compute/manager.py:2948
+#: nova/compute/manager.py:3000
#, fuzzy
msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr "set_admin_password is not implemented by this driver."
-#: nova/compute/manager.py:2961
-#, python-format
-msgid "set_admin_password failed: %s"
-msgstr "set_admin_password failed: %s"
-
-#: nova/compute/manager.py:2967
+#: nova/compute/manager.py:3019
msgid "error setting admin password"
msgstr "error setting admin password"
-#: nova/compute/manager.py:2983
+#: nova/compute/manager.py:3035
#, python-format
msgid ""
"trying to inject a file into a non-running (state: %(current_state)s "
"expected: %(expected_state)s)"
msgstr ""
-#: nova/compute/manager.py:2988
+#: nova/compute/manager.py:3040
#, python-format
msgid "injecting file to %s"
msgstr ""
-#: nova/compute/manager.py:3006
+#: nova/compute/manager.py:3058
msgid ""
"Unable to find a different image to use for rescue VM, using instance's "
"current image"
msgstr ""
-#: nova/compute/manager.py:3025
+#: nova/compute/manager.py:3077
msgid "Rescuing"
msgstr "Rescuing"
-#: nova/compute/manager.py:3046
-#, fuzzy
-msgid "Error trying to Rescue Instance"
-msgstr "Error trying to reschedule"
-
-#: nova/compute/manager.py:3050
+#: nova/compute/manager.py:3102
#, fuzzy, python-format
msgid "Driver Error: %s"
msgstr "DB error: %s"
-#: nova/compute/manager.py:3073
+#: nova/compute/manager.py:3125
msgid "Unrescuing"
msgstr "Unrescuing"
-#: nova/compute/manager.py:3144
+#: nova/compute/manager.py:3196
#, python-format
msgid "Migration %s is not found during confirmation"
msgstr ""
-#: nova/compute/manager.py:3149
+#: nova/compute/manager.py:3201
#, python-format
msgid "Migration %s is already confirmed"
msgstr ""
-#: nova/compute/manager.py:3153
+#: nova/compute/manager.py:3205
#, python-format
msgid ""
"Unexpected confirmation status '%(status)s' of migration %(id)s, exit "
"confirmation process"
msgstr ""
-#: nova/compute/manager.py:3167
+#: nova/compute/manager.py:3219
msgid "Instance is not found during confirmation"
msgstr ""
-#: nova/compute/manager.py:3348
+#: nova/compute/manager.py:3400
#, fuzzy, python-format
msgid "Updating instance to original state: '%s'"
msgstr "Setting instance to %(state)s state."
-#: nova/compute/manager.py:3371
+#: nova/compute/manager.py:3423
#, fuzzy
msgid "Instance has no source host"
msgstr "Instance has no volume."
-#: nova/compute/manager.py:3377
+#: nova/compute/manager.py:3429
msgid "destination same as source!"
msgstr "destination same as source!"
-#: nova/compute/manager.py:3395
+#: nova/compute/manager.py:3447
msgid "Migrating"
msgstr "Migrating"
-#: nova/compute/manager.py:3659
-#, python-format
-msgid "Failed to rollback quota for failed finish_resize: %s"
-msgstr ""
-
-#: nova/compute/manager.py:3719
+#: nova/compute/manager.py:3784
msgid "Pausing"
msgstr "Pausing"
-#: nova/compute/manager.py:3736
+#: nova/compute/manager.py:3801
msgid "Unpausing"
msgstr "Unpausing"
-#: nova/compute/manager.py:3777
+#: nova/compute/manager.py:3842 nova/compute/manager.py:3859
msgid "Retrieving diagnostics"
msgstr "Retrieving diagnostics"
-#: nova/compute/manager.py:3812
+#: nova/compute/manager.py:3895
msgid "Resuming"
msgstr "Resuming"
-#: nova/compute/manager.py:4028
+#: nova/compute/manager.py:4115
msgid "Get console output"
msgstr "Get console output"
-#: nova/compute/manager.py:4227
+#: nova/compute/manager.py:4314
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr "Attaching volume %(volume_id)s to %(mountpoint)s"
-#: nova/compute/manager.py:4236
-#, python-format
-msgid "Failed to attach %(volume_id)s at %(mountpoint)s"
-msgstr ""
-
-#: nova/compute/manager.py:4252
+#: nova/compute/manager.py:4339
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr "Detach volume %(volume_id)s from mountpoint %(mp)s"
-#: nova/compute/manager.py:4263
+#: nova/compute/manager.py:4350
msgid "Detaching volume from unknown instance"
msgstr "Detaching volume from unknown instance"
-#: nova/compute/manager.py:4275
-#, fuzzy, python-format
-msgid "Failed to detach volume %(volume_id)s from %(mp)s"
-msgstr "Faild to detach volume %(volume_id)s from %(mp)s"
-
-#: nova/compute/manager.py:4348
-#, python-format
-msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s"
-msgstr ""
-
-#: nova/compute/manager.py:4355
-#, python-format
-msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s"
-msgstr ""
-
-#: nova/compute/manager.py:4442
+#: nova/compute/manager.py:4544
#, fuzzy, python-format
msgid "allocate_port_for_instance returned %(ports)s ports"
msgstr "allocate_for_instance() for %s"
-#: nova/compute/manager.py:4462
+#: nova/compute/manager.py:4568
#, python-format
msgid "Port %s is not attached"
msgstr ""
-#: nova/compute/manager.py:4474 nova/tests/compute/test_compute.py:10545
+#: nova/compute/manager.py:4580 nova/tests/compute/test_compute.py:10791
#, python-format
msgid "Host %s not found"
msgstr ""
-#: nova/compute/manager.py:4628
-#, python-format
-msgid "Pre live migration failed at %s"
-msgstr ""
-
-#: nova/compute/manager.py:4658
+#: nova/compute/manager.py:4798
msgid "_post_live_migration() is started.."
msgstr "_post_live_migration() is started.."
-#: nova/compute/manager.py:4731
+#: nova/compute/manager.py:4874
#, python-format
msgid "Migrating instance to %s finished successfully."
msgstr ""
-#: nova/compute/manager.py:4733
+#: nova/compute/manager.py:4876
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
@@ -5512,15 +5178,15 @@ msgstr ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
-#: nova/compute/manager.py:4758
+#: nova/compute/manager.py:4901
msgid "Post operation of migration started"
msgstr "Post operation of migration started"
-#: nova/compute/manager.py:4967
+#: nova/compute/manager.py:5106
msgid "An error occurred while refreshing the network cache."
msgstr ""
-#: nova/compute/manager.py:5021
+#: nova/compute/manager.py:5159
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
@@ -5529,12 +5195,12 @@ msgstr ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
-#: nova/compute/manager.py:5026
+#: nova/compute/manager.py:5164
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr "Setting migration %(migration_id)s to error: %(reason)s"
-#: nova/compute/manager.py:5035
+#: nova/compute/manager.py:5173
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
@@ -5543,30 +5209,26 @@ msgstr ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
-#: nova/compute/manager.py:5045
+#: nova/compute/manager.py:5183
#, python-format
msgid "Instance %s not found"
msgstr ""
-#: nova/compute/manager.py:5050
+#: nova/compute/manager.py:5188
msgid "In ERROR state"
msgstr "In ERROR state"
-#: nova/compute/manager.py:5057
+#: nova/compute/manager.py:5195
#, fuzzy, python-format
msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None"
msgstr "In states %(vm_state)s/%(task_state)s, notRESIZED/None"
-#: nova/compute/manager.py:5068
+#: nova/compute/manager.py:5206
#, python-format
msgid "Error auto-confirming resize: %s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:5097
-msgid "Periodic task failed to offload instance."
-msgstr ""
-
-#: nova/compute/manager.py:5117
+#: nova/compute/manager.py:5255
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
@@ -5575,20 +5237,15 @@ msgstr ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
-#: nova/compute/manager.py:5137
-#, python-format
-msgid "Failed to generate usage audit for instance on host %s"
-msgstr "Failed to generate usage audit for instance on host %s"
-
-#: nova/compute/manager.py:5166
+#: nova/compute/manager.py:5304
msgid "Updating bandwidth usage cache"
msgstr "Updating bandwidth usage cache"
-#: nova/compute/manager.py:5188
+#: nova/compute/manager.py:5326
msgid "Bandwidth usage not supported by hypervisor."
msgstr ""
-#: nova/compute/manager.py:5311
+#: nova/compute/manager.py:5449
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
@@ -5597,16 +5254,7 @@ msgstr ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
"the hypervisor."
-#: nova/compute/manager.py:5318 nova/compute/manager.py:5381
-#, python-format
-msgid "During sync_power_state the instance has a pending task (%(task)s). Skip."
-msgstr ""
-
-#: nova/compute/manager.py:5342
-msgid "Periodic sync_power_state task had an error while processing an instance."
-msgstr ""
-
-#: nova/compute/manager.py:5368
+#: nova/compute/manager.py:5515
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
@@ -5615,110 +5263,110 @@ msgstr ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
-#: nova/compute/manager.py:5406
+#: nova/compute/manager.py:5528
+#, python-format
+msgid "During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+
+#: nova/compute/manager.py:5553
msgid "Instance shutdown by itself. Calling the stop API."
msgstr "Instance shutdown by itself. Calling the stop API."
-#: nova/compute/manager.py:5418 nova/compute/manager.py:5427
-#: nova/compute/manager.py:5458 nova/compute/manager.py:5469
-msgid "error during stop() in sync_power_state."
-msgstr "error during stop() in sync_power_state."
-
-#: nova/compute/manager.py:5422
+#: nova/compute/manager.py:5572
#, fuzzy
msgid "Instance is suspended unexpectedly. Calling the stop API."
msgstr "Instance is paused or suspended unexpectedly. Calling the stop API."
-#: nova/compute/manager.py:5438
+#: nova/compute/manager.py:5588
#, fuzzy
msgid "Instance is paused unexpectedly. Ignore."
msgstr "Instance is paused or suspended unexpectedly. Calling the stop API."
-#: nova/compute/manager.py:5444
+#: nova/compute/manager.py:5594
msgid "Instance is unexpectedly not found. Ignore."
msgstr ""
-#: nova/compute/manager.py:5450
+#: nova/compute/manager.py:5600
msgid "Instance is not stopped. Calling the stop API."
msgstr "Instance is not stopped. Calling the stop API."
-#: nova/compute/manager.py:5464
+#: nova/compute/manager.py:5614
msgid "Paused instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:5478
+#: nova/compute/manager.py:5628
msgid "Instance is not (soft-)deleted."
msgstr "Instance is not (soft-)deleted."
-#: nova/compute/manager.py:5507
+#: nova/compute/manager.py:5658
msgid "Reclaiming deleted instance"
msgstr "Reclaiming deleted instance"
-#: nova/compute/manager.py:5511
+#: nova/compute/manager.py:5662
#, python-format
msgid "Periodic reclaim failed to delete instance: %s"
msgstr ""
-#: nova/compute/manager.py:5536
+#: nova/compute/manager.py:5687
#, fuzzy, python-format
msgid "Deleting orphan compute node %s"
msgstr "Loading compute driver '%s'"
-#: nova/compute/manager.py:5544 nova/compute/resource_tracker.py:392
+#: nova/compute/manager.py:5695 nova/compute/resource_tracker.py:406
#, python-format
msgid "No service record for host %s"
msgstr "No service record for host %s"
-#: nova/compute/manager.py:5585
+#: nova/compute/manager.py:5735
#, python-format
msgid ""
"Detected instance with name label '%s' which is marked as DELETED but "
"still present on host."
msgstr ""
-#: nova/compute/manager.py:5591
+#: nova/compute/manager.py:5741
#, python-format
msgid ""
"Powering off instance with name label '%s' which is marked as DELETED but"
" still present on host."
msgstr ""
-#: nova/compute/manager.py:5600
+#: nova/compute/manager.py:5750
msgid "set_bootable is not implemented for the current driver"
msgstr ""
-#: nova/compute/manager.py:5605
+#: nova/compute/manager.py:5755
msgid "Failed to power off instance"
msgstr ""
-#: nova/compute/manager.py:5609
+#: nova/compute/manager.py:5759
#, python-format
msgid ""
"Destroying instance with name label '%s' which is marked as DELETED but "
"still present on host."
msgstr ""
-#: nova/compute/manager.py:5619
+#: nova/compute/manager.py:5769
#, python-format
msgid "Periodic cleanup failed to delete instance: %s"
msgstr ""
-#: nova/compute/manager.py:5623
+#: nova/compute/manager.py:5773
#, python-format
msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action"
msgstr ""
-#: nova/compute/manager.py:5654
+#: nova/compute/manager.py:5805
#, python-format
msgid "Setting instance back to %(state)s after: %(error)s"
msgstr ""
-#: nova/compute/manager.py:5664
+#: nova/compute/manager.py:5815
#, fuzzy, python-format
msgid "Setting instance back to ACTIVE after: %s"
msgstr "Setting instance to %(state)s state."
-#: nova/compute/resource_tracker.py:106
+#: nova/compute/resource_tracker.py:111
#, fuzzy
msgid ""
"Host field should not be set on the instance until resources have been "
@@ -5727,22 +5375,22 @@ msgstr ""
"Host field should be not be set on the instance until resources have been"
" claimed."
-#: nova/compute/resource_tracker.py:111
+#: nova/compute/resource_tracker.py:116
msgid ""
"Node field should not be set on the instance until resources have been "
"claimed."
msgstr ""
-#: nova/compute/resource_tracker.py:273
+#: nova/compute/resource_tracker.py:276
#, python-format
msgid "Cannot get the metrics from %s."
msgstr ""
-#: nova/compute/resource_tracker.py:292
+#: nova/compute/resource_tracker.py:295
msgid "Auditing locally available compute resources"
msgstr ""
-#: nova/compute/resource_tracker.py:297
+#: nova/compute/resource_tracker.py:300
msgid ""
"Virt driver does not support 'get_available_resource' Compute tracking "
"is disabled."
@@ -5750,62 +5398,64 @@ msgstr ""
"Virt driver does not support 'get_available_resource' Compute tracking "
"is disabled."
-#: nova/compute/resource_tracker.py:372
+#: nova/compute/resource_tracker.py:375
#, fuzzy, python-format
msgid "Compute_service record created for %(host)s:%(node)s"
msgstr "Compute_service record created for %s "
-#: nova/compute/resource_tracker.py:378
+#: nova/compute/resource_tracker.py:381
#, fuzzy, python-format
msgid "Compute_service record updated for %(host)s:%(node)s"
msgstr "Compute_service record updated for %s "
-#: nova/compute/resource_tracker.py:431
+#: nova/compute/resource_tracker.py:446
#, python-format
-msgid "Free ram (MB): %s"
-msgstr "Free ram (MB): %s"
+msgid ""
+"Total physical ram (MB): %(pram)s, total allocated virtual ram (MB): "
+"%(vram)s"
+msgstr ""
-#: nova/compute/resource_tracker.py:432
+#: nova/compute/resource_tracker.py:450
#, python-format
msgid "Free disk (GB): %s"
msgstr "Free disk (GB): %s"
-#: nova/compute/resource_tracker.py:437
+#: nova/compute/resource_tracker.py:454
#, python-format
-msgid "Free VCPUS: %s"
-msgstr "Free VCPUS: %s"
+msgid "Total usable vcpus: %(tcpu)s, total allocated vcpus: %(ucpu)s"
+msgstr ""
-#: nova/compute/resource_tracker.py:439
+#: nova/compute/resource_tracker.py:458
msgid "Free VCPU information unavailable"
msgstr "Free VCPU information unavailable"
-#: nova/compute/resource_tracker.py:442
+#: nova/compute/resource_tracker.py:461
#, python-format
msgid "PCI stats: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:478
+#: nova/compute/resource_tracker.py:512
#, fuzzy, python-format
msgid "Updating from migration %s"
msgstr "Starting finish_migration"
-#: nova/compute/resource_tracker.py:545
+#: nova/compute/resource_tracker.py:577
#, fuzzy
msgid "Instance not resizing, skipping migration."
msgstr "VM is not present, skipping destroy..."
-#: nova/compute/resource_tracker.py:560
+#: nova/compute/resource_tracker.py:592
msgid "Flavor could not be found, skipping migration."
msgstr ""
-#: nova/compute/resource_tracker.py:650
+#: nova/compute/resource_tracker.py:682
#, python-format
msgid ""
"Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB "
"memory)"
msgstr ""
-#: nova/compute/resource_tracker.py:664
+#: nova/compute/resource_tracker.py:696
#, python-format
msgid "Missing keys: %s"
msgstr "Missing keys: %s"
@@ -5819,39 +5469,23 @@ msgstr "No compute host specified"
msgid "Unable to find host for Instance %s"
msgstr "Unable to find host for Instance %s"
-#: nova/compute/utils.py:209
-#, python-format
-msgid "Can't access image %(image_id)s: %(error)s"
-msgstr ""
-
-#: nova/compute/utils.py:333
-#, python-format
-msgid ""
-"No host name specified for the notification of HostAPI.%s and it will be "
-"ignored"
-msgstr ""
-
-#: nova/compute/utils.py:461
-#, python-format
-msgid ""
-"Value of 0 or None specified for %s. This behaviour will change in "
-"meaning in the K release, to mean 'call at the default rate' rather than "
-"'do not call'. To keep the 'do not call' behaviour, use a negative value."
+#: nova/compute/stats.py:49
+msgid "Unexpected type adding stats"
msgstr ""
-#: nova/compute/monitors/__init__.py:177
+#: nova/compute/monitors/__init__.py:176
#, python-format
msgid ""
"Excluding monitor %(monitor_name)s due to metric name overlap; "
"overlapping metrics: %(overlap)s"
msgstr ""
-#: nova/compute/monitors/__init__.py:185
+#: nova/compute/monitors/__init__.py:184
#, python-format
msgid "Monitor %(monitor_name)s cannot be used: %(ex)s"
msgstr ""
-#: nova/compute/monitors/__init__.py:191
+#: nova/compute/monitors/__init__.py:190
#, python-format
msgid "The following monitors have been disabled: %s"
msgstr ""
@@ -5861,46 +5495,50 @@ msgstr ""
msgid "Not all properties needed are implemented in the compute driver: %s"
msgstr ""
-#: nova/conductor/api.py:300
+#: nova/conductor/api.py:315
msgid "nova-conductor connection established successfully"
msgstr ""
-#: nova/conductor/api.py:305
+#: nova/conductor/api.py:320
msgid ""
"Timed out waiting for nova-conductor. Is it running? Or did this service"
" start before nova-conductor? Reattempting establishment of nova-"
"conductor connection..."
msgstr ""
-#: nova/conductor/manager.py:124
+#: nova/conductor/manager.py:123
#, python-format
msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s"
msgstr ""
-#: nova/conductor/manager.py:522
+#: nova/conductor/manager.py:519
msgid "No valid host found for cold migrate"
msgstr ""
-#: nova/conductor/manager.py:586
+#: nova/conductor/manager.py:582
#, python-format
msgid ""
"Migration of instance %(instance_id)s to host %(dest)s unexpectedly "
"failed."
msgstr ""
-#: nova/conductor/manager.py:673
+#: nova/conductor/manager.py:669
#, python-format
msgid "Unshelve attempted but the image %s cannot be found."
msgstr ""
-#: nova/conductor/manager.py:696
+#: nova/conductor/manager.py:692
msgid "No valid host found for unshelve instance"
msgstr ""
-#: nova/conductor/manager.py:700
+#: nova/conductor/manager.py:696
msgid "Unshelve attempted but vm_state not SHELVED or SHELVED_OFFLOADED"
msgstr ""
+#: nova/conductor/manager.py:733
+msgid "No valid host found for rebuild"
+msgstr ""
+
#: nova/conductor/tasks/live_migrate.py:113
#, python-format
msgid ""
@@ -5974,85 +5612,85 @@ msgstr ""
msgid "Failed to notify cells of instance update"
msgstr "Failed to reboot instance"
-#: nova/db/api.py:1685
+#: nova/db/api.py:1683
msgid "Failed to notify cells of bw_usage update"
msgstr ""
-#: nova/db/sqlalchemy/api.py:204
+#: nova/db/sqlalchemy/api.py:207
#, python-format
msgid "Deadlock detected when running '%(func_name)s': Retrying..."
msgstr ""
-#: nova/db/sqlalchemy/api.py:245
+#: nova/db/sqlalchemy/api.py:248
msgid "model or base_model parameter should be subclass of NovaBase"
msgstr ""
-#: nova/db/sqlalchemy/api.py:258
-#: nova/openstack/common/db/sqlalchemy/utils.py:174
-#: nova/virt/baremetal/db/sqlalchemy/api.py:60
+#: nova/db/sqlalchemy/api.py:261
+#: nova/openstack/common/db/sqlalchemy/utils.py:173
+#: nova/virt/baremetal/db/sqlalchemy/api.py:61
#, python-format
msgid "Unrecognized read_deleted value '%s'"
msgstr "Unrecognized read_deleted value '%s'"
-#: nova/db/sqlalchemy/api.py:745
+#: nova/db/sqlalchemy/api.py:753
#, fuzzy, python-format
msgid "Invalid floating ip id %s in request"
msgstr "instance %s: rescued"
-#: nova/db/sqlalchemy/api.py:850
+#: nova/db/sqlalchemy/api.py:858
msgid "Failed to update usages bulk deallocating floating IP"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1006
+#: nova/db/sqlalchemy/api.py:1007
#, fuzzy, python-format
msgid "Invalid floating IP %s in request"
msgstr "instance %s: rescued"
-#: nova/db/sqlalchemy/api.py:1308 nova/db/sqlalchemy/api.py:1347
+#: nova/db/sqlalchemy/api.py:1310 nova/db/sqlalchemy/api.py:1349
#, fuzzy, python-format
msgid "Invalid fixed IP Address %s in request"
msgstr "instance %s: rescued"
-#: nova/db/sqlalchemy/api.py:1482
+#: nova/db/sqlalchemy/api.py:1484
#, fuzzy, python-format
msgid "Invalid virtual interface address %s in request"
msgstr "instance %s: rescued"
-#: nova/db/sqlalchemy/api.py:1576
+#: nova/db/sqlalchemy/api.py:1578
#, python-format
msgid ""
"Unknown osapi_compute_unique_server_name_scope value: %s Flag must be "
"empty, \"global\" or \"project\""
msgstr ""
-#: nova/db/sqlalchemy/api.py:1735
+#: nova/db/sqlalchemy/api.py:1738
#, fuzzy, python-format
msgid "Invalid instance id %s in request"
msgstr "instance %s: rescued"
-#: nova/db/sqlalchemy/api.py:2013
+#: nova/db/sqlalchemy/api.py:2017
#, python-format
msgid "Invalid field name: %s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:3242
+#: nova/db/sqlalchemy/api.py:3246
#, python-format
msgid "Change will make usage less than 0 for the following resources: %s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4892
+#: nova/db/sqlalchemy/api.py:4898
#, python-format
msgid ""
"Volume(%s) has lower stats then what is in the database. Instance must "
"have been rebooted or crashed. Updating totals."
msgstr ""
-#: nova/db/sqlalchemy/api.py:5249
+#: nova/db/sqlalchemy/api.py:5262
#, python-format
msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries"
msgstr ""
-#: nova/db/sqlalchemy/api.py:5639
+#: nova/db/sqlalchemy/api.py:5652
#, python-format
msgid "IntegrityError detected when archiving table %s"
msgstr ""
@@ -6085,15 +5723,15 @@ msgstr ""
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr ""
-#: nova/db/sqlalchemy/utils.py:105
+#: nova/db/sqlalchemy/utils.py:103
msgid "Specify `table_name` or `table` param"
msgstr ""
-#: nova/db/sqlalchemy/utils.py:108
+#: nova/db/sqlalchemy/utils.py:106
msgid "Specify only one param `table_name` `table`"
msgstr ""
-#: nova/db/sqlalchemy/utils.py:131 nova/db/sqlalchemy/utils.py:135
+#: nova/db/sqlalchemy/utils.py:129 nova/db/sqlalchemy/utils.py:133
#: nova/db/sqlalchemy/migrate_repo/versions/216_havana.py:84
#: nova/db/sqlalchemy/migrate_repo/versions/216_havana.py:1103
msgid "Exception while creating table."
@@ -6103,7 +5741,7 @@ msgstr ""
msgid "Exception while seeding instance_types table"
msgstr ""
-#: nova/image/glance.py:231
+#: nova/image/glance.py:235
#, python-format
msgid ""
"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', "
@@ -6112,19 +5750,19 @@ msgstr ""
"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', "
"%(extra)s."
-#: nova/image/glance.py:265
+#: nova/image/glance.py:267
#, python-format
msgid ""
"When loading the module %(module_str)s the following error occurred: "
"%(ex)s"
msgstr ""
-#: nova/image/glance.py:303
+#: nova/image/glance.py:326
#, python-format
msgid "Failed to instantiate the download handler for %(scheme)s"
msgstr ""
-#: nova/image/glance.py:319
+#: nova/image/glance.py:342
#, python-format
msgid "Successfully transferred using %s"
msgstr ""
@@ -6270,17 +5908,17 @@ msgstr ""
msgid "Not deleting key %s"
msgstr ""
-#: nova/network/api.py:198 nova/network/neutronv2/api.py:797
+#: nova/network/api.py:196 nova/network/neutronv2/api.py:845
#, python-format
msgid "re-assign floating IP %(address)s from instance %(instance_id)s"
msgstr "re-assign floating IP %(address)s from instance %(instance_id)s"
-#: nova/network/base_api.py:49
+#: nova/network/base_api.py:48
#, fuzzy
msgid "Failed storing info cache"
msgstr "Failed to terminate instance"
-#: nova/network/base_api.py:68
+#: nova/network/base_api.py:67
msgid "instance is a required argument to use @refresh_cache"
msgstr "instance is a required argument to use @refresh_cache"
@@ -6294,63 +5932,63 @@ msgstr "Compute driver option required, but not specified"
msgid "Loading network driver '%s'"
msgstr "Loading compute driver '%s'"
-#: nova/network/floating_ips.py:90
+#: nova/network/floating_ips.py:85
#, python-format
msgid "Fixed ip %s not found"
msgstr ""
-#: nova/network/floating_ips.py:180
+#: nova/network/floating_ips.py:176
#, python-format
msgid "Floating IP %s is not associated. Ignore."
msgstr ""
-#: nova/network/floating_ips.py:199
+#: nova/network/floating_ips.py:195
#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr "Address |%(address)s| is not allocated"
-#: nova/network/floating_ips.py:203
+#: nova/network/floating_ips.py:199
#, python-format
msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
msgstr "Address |%(address)s| is not allocated to your project |%(project)s|"
-#: nova/network/floating_ips.py:223
+#: nova/network/floating_ips.py:219
#, python-format
msgid "Quota exceeded for %s, tried to allocate floating IP"
msgstr ""
-#: nova/network/floating_ips.py:283
+#: nova/network/floating_ips.py:278
msgid "Failed to update usages deallocating floating IP"
msgstr "Failed to update usages deallocating floating IP"
-#: nova/network/floating_ips.py:385
+#: nova/network/floating_ips.py:376
#, python-format
msgid "Failed to disassociated floating address: %s"
msgstr ""
-#: nova/network/floating_ips.py:390
+#: nova/network/floating_ips.py:381
#, python-format
msgid "Interface %s not found"
msgstr ""
-#: nova/network/floating_ips.py:553
+#: nova/network/floating_ips.py:540
#, python-format
msgid "Starting migration network for instance %s"
msgstr ""
-#: nova/network/floating_ips.py:560
+#: nova/network/floating_ips.py:546
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will not migrate it "
msgstr ""
-#: nova/network/floating_ips.py:593
+#: nova/network/floating_ips.py:575
#, python-format
msgid "Finishing migration network for instance %s"
msgstr ""
-#: nova/network/floating_ips.py:601
+#: nova/network/floating_ips.py:582
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
@@ -6359,7 +5997,7 @@ msgstr ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notsetup it."
-#: nova/network/floating_ips.py:644
+#: nova/network/floating_ips.py:625
#, python-format
msgid ""
"Database inconsistency: DNS domain |%s| is registered in the Nova db but "
@@ -6370,12 +6008,12 @@ msgstr ""
"not visible to either the floating or instance DNS driver. It will be "
"ignored."
-#: nova/network/floating_ips.py:684
+#: nova/network/floating_ips.py:665
#, python-format
msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
msgstr "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
-#: nova/network/floating_ips.py:693
+#: nova/network/floating_ips.py:674
#, python-format
msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
msgstr "Domain |%(domain)s| already exists, changing project to |%(project)s|."
@@ -6404,17 +6042,17 @@ msgstr "This driver only supports type 'a' entries."
msgid "This shouldn't be getting called except during testing."
msgstr ""
-#: nova/network/linux_net.py:227
+#: nova/network/linux_net.py:232
#, python-format
msgid "Attempted to remove chain %s which does not exist"
msgstr "Attempted to remove chain %s which does not exist"
-#: nova/network/linux_net.py:263
+#: nova/network/linux_net.py:268
#, python-format
msgid "Unknown chain: %r"
msgstr "Unknown chain: %r"
-#: nova/network/linux_net.py:294
+#: nova/network/linux_net.py:301
#, python-format
msgid ""
"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r "
@@ -6423,52 +6061,52 @@ msgstr ""
"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r "
"%(top)r"
-#: nova/network/linux_net.py:762
+#: nova/network/linux_net.py:777
#, python-format
msgid "Removed %(num)d duplicate rules for floating ip %(float)s"
msgstr ""
-#: nova/network/linux_net.py:810
+#: nova/network/linux_net.py:825
#, python-format
msgid "Error deleting conntrack entries for %s"
msgstr ""
-#: nova/network/linux_net.py:1068
+#: nova/network/linux_net.py:1091
#, python-format
msgid "Hupping dnsmasq threw %s"
msgstr "Hupping dnsmasq threw %s"
-#: nova/network/linux_net.py:1150
+#: nova/network/linux_net.py:1172
#, python-format
msgid "killing radvd threw %s"
msgstr "killing radvd threw %s"
-#: nova/network/linux_net.py:1302
+#: nova/network/linux_net.py:1333
#, python-format
msgid "Unable to execute %(cmd)s. Exception: %(exception)s"
msgstr ""
-#: nova/network/linux_net.py:1360
+#: nova/network/linux_net.py:1391
#, python-format
msgid "Failed removing net device: '%s'"
msgstr ""
-#: nova/network/linux_net.py:1532
+#: nova/network/linux_net.py:1568
#, fuzzy, python-format
msgid "Adding interface %(interface)s to bridge %(bridge)s"
msgstr "Ensuring vlan %(vlan)s and bridge %(bridge)s"
-#: nova/network/linux_net.py:1538
+#: nova/network/linux_net.py:1574
#, python-format
msgid "Failed to add interface: %s"
msgstr "Failed to add interface: %s"
-#: nova/network/manager.py:836
+#: nova/network/manager.py:813
#, python-format
msgid "instance-dns-zone not found |%s|."
msgstr ""
-#: nova/network/manager.py:843
+#: nova/network/manager.py:820
#, python-format
msgid ""
"instance-dns-zone is |%(domain)s|, which is in availability zone "
@@ -6479,55 +6117,50 @@ msgstr ""
"|%(zone)s|. Instance is in zone |%(zone2)s|. No DNS record will be "
"created."
-#: nova/network/manager.py:882
-#, python-format
-msgid "Quota exceeded for %s, tried to allocate fixed IP"
-msgstr ""
-
-#: nova/network/manager.py:942
+#: nova/network/manager.py:943
msgid "Error cleaning up fixed ip allocation. Manual cleanup may be required."
msgstr ""
-#: nova/network/manager.py:972
+#: nova/network/manager.py:973
#, fuzzy
msgid "Failed to update usages deallocating fixed IP"
msgstr "Failed to update usages deallocating floating IP"
-#: nova/network/manager.py:996
+#: nova/network/manager.py:997
#, python-format
msgid "Unable to release %s because vif doesn't exist."
msgstr "Unable to release %s because vif doesn't exist."
-#: nova/network/manager.py:1037
+#: nova/network/manager.py:1038
#, python-format
msgid "IP %s leased that is not associated"
msgstr "IP %s leased that is not associated"
-#: nova/network/manager.py:1043
+#: nova/network/manager.py:1044
#, python-format
msgid "IP |%s| leased that isn't allocated"
msgstr "IP |%s| leased that isn't allocated"
-#: nova/network/manager.py:1052
+#: nova/network/manager.py:1053
#, python-format
msgid "IP %s released that is not associated"
msgstr "IP %s released that is not associated"
-#: nova/network/manager.py:1056
+#: nova/network/manager.py:1057
#, python-format
msgid "IP %s released that was not leased"
msgstr "IP %s released that was not leased"
-#: nova/network/manager.py:1074
+#: nova/network/manager.py:1075
#, python-format
msgid "%s must be an integer"
msgstr "%s must be an integer"
-#: nova/network/manager.py:1106
+#: nova/network/manager.py:1107
msgid "Maximum allowed length for 'label' is 255."
msgstr "Maximum allowed length for 'label' is 255."
-#: nova/network/manager.py:1126
+#: nova/network/manager.py:1127
#, python-format
msgid ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
@@ -6536,16 +6169,16 @@ msgstr ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
"network_size flag."
-#: nova/network/manager.py:1211
+#: nova/network/manager.py:1212
msgid "cidr already in use"
msgstr "cidr already in use"
-#: nova/network/manager.py:1214
+#: nova/network/manager.py:1215
#, python-format
msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
msgstr "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
-#: nova/network/manager.py:1225
+#: nova/network/manager.py:1226
#, python-format
msgid ""
"requested cidr (%(cidr)s) conflicts with existing smaller cidr "
@@ -6559,7 +6192,7 @@ msgstr ""
msgid "Network must be disassociated from project %s before delete"
msgstr "Network must be disassociated from project %s before delete"
-#: nova/network/manager.py:1949
+#: nova/network/manager.py:1955
msgid ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
@@ -6567,7 +6200,7 @@ msgstr ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
-#: nova/network/manager.py:1956
+#: nova/network/manager.py:1962
#, fuzzy, python-format
msgid ""
"The network range is not big enough to fit %(num_networks)s networks. "
@@ -6600,109 +6233,37 @@ msgstr "_delete: %s"
msgid "Cannot delete domain |%s|"
msgstr "Cannot delete aggregate: %(id)s"
-#: nova/network/model.py:94
+#: nova/network/model.py:96
#, python-format
msgid "Invalid IP format %s"
msgstr ""
-#: nova/network/neutronv2/api.py:212
-msgid "Neutron error: quota exceeded"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:215
-#, python-format
-msgid "Neutron error creating port on network %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:248
+#: nova/network/neutronv2/api.py:269
#, python-format
msgid "empty project id for instance %s"
msgstr "empty project id for instance %s"
-#: nova/network/neutronv2/api.py:283
-msgid "No network configured!"
+#: nova/network/neutronv2/api.py:313 nova/network/neutronv2/api.py:678
+msgid "Multiple possible networks found, use a Network ID to be more specific."
msgstr ""
-#: nova/network/neutronv2/api.py:303
+#: nova/network/neutronv2/api.py:335
#, python-format
msgid ""
"Multiple security groups found matching '%s'. Use an ID to be more "
"specific."
msgstr ""
-#: nova/network/neutronv2/api.py:373
-#, python-format
-msgid "Failed to update port %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:380
-#, python-format
-msgid "Failed to delete port %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:443
+#: nova/network/neutronv2/api.py:489
#, python-format
msgid "Unable to reset device ID for port %s"
msgstr ""
-#: nova/network/neutronv2/api.py:451
-#, python-format
-msgid "Port %s does not exist"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:454 nova/network/neutronv2/api.py:478
-#, python-format
-msgid "Failed to delete neutron port %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:576
-#, fuzzy, python-format
-msgid ""
-"Unable to update port %(portid)s on subnet %(subnet_id)s with failure: "
-"%(exception)s"
-msgstr "Fail to delete port %(portid)s with failure: %(exception)s"
-
-#: nova/network/neutronv2/api.py:605
-#, fuzzy, python-format
-msgid "Unable to update port %(portid)s with failure: %(exception)s"
-msgstr "Fail to delete port %(portid)s with failure: %(exception)s"
-
-#: nova/network/neutronv2/api.py:632
-msgid "Multiple possible networks found, use a Network ID to be more specific."
-msgstr ""
-
-#: nova/network/neutronv2/api.py:651
-#, python-format
-msgid "Failed to access port %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:880
-#, python-format
-msgid "Unable to access floating IP %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:968
+#: nova/network/neutronv2/api.py:1021
#, python-format
msgid "Multiple floating IP pools matches found for name '%s'"
msgstr "Multiple floating IP pools matches found for name '%s'"
-#: nova/network/neutronv2/api.py:1012
-#, python-format
-msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:1071
-#, python-format
-msgid "Unable to update host of port %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:1107
-#, python-format
-msgid ""
-"Network %(id)s not matched with the tenants network! The ports tenant "
-"%(tenant_id)s will be used."
-msgstr ""
-
#: nova/network/security_group/neutron_driver.py:57
#, python-format
msgid "Neutron Error creating security group %s"
@@ -6786,6 +6347,14 @@ msgid ""
"%(instance)s"
msgstr ""
+#: nova/network/security_group/security_group_base.py:89
+msgid "Type and Code must be integers for ICMP protocol type"
+msgstr ""
+
+#: nova/network/security_group/security_group_base.py:92
+msgid "To and From ports must be integers"
+msgstr ""
+
#: nova/network/security_group/security_group_base.py:134
#, python-format
msgid "This rule already exists in group %s"
@@ -6796,22 +6365,22 @@ msgstr "This rule already exists in group %s"
msgid "Error setting %(attr)s"
msgstr "error setting admin password"
-#: nova/objects/base.py:247
+#: nova/objects/base.py:262
#, python-format
msgid "Unable to instantiate unregistered object type %(objtype)s"
msgstr ""
-#: nova/objects/base.py:366
+#: nova/objects/base.py:381
#, python-format
msgid "Cannot load '%s' in the base class"
msgstr ""
-#: nova/objects/base.py:412
+#: nova/objects/base.py:427
#, python-format
msgid "%(objname)s object has no attribute '%(attrname)s'"
msgstr ""
-#: nova/objects/block_device.py:136
+#: nova/objects/block_device.py:149
msgid "Volume does not belong to the requested instance."
msgstr ""
@@ -6825,44 +6394,44 @@ msgstr ""
msgid "Element %(key)s:%(val)s must be of type %(expected)s not %(actual)s"
msgstr ""
-#: nova/objects/fields.py:157
+#: nova/objects/fields.py:165
#, python-format
msgid "Field `%s' cannot be None"
msgstr ""
-#: nova/objects/fields.py:232
+#: nova/objects/fields.py:246
#, python-format
msgid "A string is required here, not %s"
msgstr ""
-#: nova/objects/fields.py:268
+#: nova/objects/fields.py:286
msgid "A datetime.datetime is required here"
msgstr ""
-#: nova/objects/fields.py:306 nova/objects/fields.py:315
-#: nova/objects/fields.py:324
+#: nova/objects/fields.py:328 nova/objects/fields.py:337
+#: nova/objects/fields.py:346
#, python-format
msgid "Network \"%s\" is not valid"
msgstr ""
-#: nova/objects/fields.py:363
+#: nova/objects/fields.py:385
msgid "A list is required here"
msgstr ""
-#: nova/objects/fields.py:379
+#: nova/objects/fields.py:405
msgid "A dict is required here"
msgstr ""
-#: nova/objects/fields.py:418
+#: nova/objects/fields.py:449
#, python-format
msgid "An object of type %s is required here"
msgstr ""
-#: nova/objects/fields.py:445
+#: nova/objects/fields.py:488
msgid "A NetworkModel is required here"
msgstr ""
-#: nova/objects/instance.py:432
+#: nova/objects/instance.py:433
#, python-format
msgid "No save handler for %s"
msgstr ""
@@ -6871,11 +6440,11 @@ msgstr ""
msgid "Failed to notify cells of instance info cache update"
msgstr ""
-#: nova/openstack/common/gettextutils.py:320
+#: nova/openstack/common/gettextutils.py:301
msgid "Message objects do not support addition."
msgstr ""
-#: nova/openstack/common/gettextutils.py:330
+#: nova/openstack/common/gettextutils.py:311
msgid ""
"Message objects do not support str() because they may contain non-ascii "
"characters. Please use unicode() or translate() instead."
@@ -6890,32 +6459,32 @@ msgstr ""
msgid "Snapshot list encountered but no header found!"
msgstr ""
-#: nova/openstack/common/lockutils.py:102
+#: nova/openstack/common/lockutils.py:101
#, python-format
msgid "Unable to acquire lock on `%(filename)s` due to %(exception)s"
msgstr ""
-#: nova/openstack/common/log.py:327
+#: nova/openstack/common/log.py:289
#, fuzzy, python-format
msgid "Deprecated: %s"
msgstr "Deprecated Config: %s"
-#: nova/openstack/common/log.py:436
+#: nova/openstack/common/log.py:397
#, fuzzy, python-format
msgid "Error loading logging config %(log_config)s: %(err_msg)s"
msgstr "Error reading image info file %(filename)s: %(error)s"
-#: nova/openstack/common/log.py:486
+#: nova/openstack/common/log.py:458
#, python-format
msgid "syslog facility must be one of: %s"
msgstr "syslog facility must be one of: %s"
-#: nova/openstack/common/log.py:729
+#: nova/openstack/common/log.py:709
#, fuzzy, python-format
msgid "Fatal call to deprecated config: %(msg)s"
msgstr "Fatal call to deprecated config %(msg)s"
-#: nova/openstack/common/periodic_task.py:39
+#: nova/openstack/common/periodic_task.py:40
#, python-format
msgid "Unexpected argument for periodic task creation: %(arg)s."
msgstr ""
@@ -6969,40 +6538,50 @@ msgstr "Environment not supported over SSH"
msgid "process_input not supported over SSH"
msgstr "process_input not supported over SSH"
-#: nova/openstack/common/sslutils.py:98
+#: nova/openstack/common/sslutils.py:95
#, python-format
msgid "Invalid SSL version : %s"
msgstr ""
-#: nova/openstack/common/strutils.py:92
+#: nova/openstack/common/strutils.py:114
#, python-format
msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s"
msgstr ""
-#: nova/openstack/common/strutils.py:202
+#: nova/openstack/common/strutils.py:219
#, python-format
msgid "Invalid unit system: \"%s\""
msgstr ""
-#: nova/openstack/common/strutils.py:211
+#: nova/openstack/common/strutils.py:228
#, python-format
msgid "Invalid string format: %s"
msgstr ""
-#: nova/openstack/common/versionutils.py:69
+#: nova/openstack/common/versionutils.py:86
#, python-format
msgid ""
"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and "
"may be removed in %(remove_in)s."
msgstr ""
-#: nova/openstack/common/versionutils.py:73
+#: nova/openstack/common/versionutils.py:90
#, python-format
msgid ""
"%(what)s is deprecated as of %(as_of)s and may be removed in "
"%(remove_in)s. It will not be superseded."
msgstr ""
+#: nova/openstack/common/versionutils.py:94
+#, python-format
+msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s."
+msgstr ""
+
+#: nova/openstack/common/versionutils.py:97
+#, python-format
+msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded."
+msgstr ""
+
#: nova/openstack/common/db/sqlalchemy/migration.py:226
#, python-format
msgid ""
@@ -7016,18 +6595,18 @@ msgid ""
"the current version of the schema manually."
msgstr ""
-#: nova/openstack/common/db/sqlalchemy/utils.py:119
+#: nova/openstack/common/db/sqlalchemy/utils.py:118
msgid "Unknown sort direction, must be 'desc' or 'asc'"
msgstr "Unknown sort direction, must be 'desc' or 'asc'"
-#: nova/openstack/common/db/sqlalchemy/utils.py:162
+#: nova/openstack/common/db/sqlalchemy/utils.py:161
#, python-format
msgid ""
"There is no `deleted` column in `%s` table. Project doesn't use soft-"
"deleted feature."
msgstr ""
-#: nova/openstack/common/db/sqlalchemy/utils.py:181
+#: nova/openstack/common/db/sqlalchemy/utils.py:180
#, python-format
msgid "There is no `project_id` column in `%s` table."
msgstr ""
@@ -7055,7 +6634,7 @@ msgstr ""
msgid "Unsupported id columns type"
msgstr "Unsupported Content-Type"
-#: nova/pci/pci_manager.py:156
+#: nova/pci/pci_manager.py:113
#, python-format
msgid ""
"Trying to remove device with %(status)s ownership %(instance_uuid)s "
@@ -7087,66 +6666,73 @@ msgstr "Driver must implement schedule_run_instance"
msgid "Driver must implement select_destinations"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:80
+#: nova/scheduler/filter_scheduler.py:84
#, fuzzy, python-format
msgid ""
"Attempting to build %(num_instances)d instance(s) uuids: "
"%(instance_uuids)s"
msgstr "Attempting to build %(num_instances)d instance(s)"
-#: nova/scheduler/filter_scheduler.py:109
+#: nova/scheduler/filter_scheduler.py:113
#, fuzzy, python-format
msgid "Choosing host %(weighed_host)s for instance %(instance_uuid)s"
msgstr "Destroying VDIs for Instance %(instance_uuid)s"
-#: nova/scheduler/filter_scheduler.py:170
+#: nova/scheduler/filter_scheduler.py:173
msgid "Instance disappeared during scheduling"
msgstr ""
-#: nova/scheduler/host_manager.py:173
+#: nova/scheduler/filter_scheduler.py:219
+msgid "ServerGroupAffinityFilter not configured"
+msgstr ""
+
+#: nova/scheduler/filter_scheduler.py:224
+msgid "ServerGroupAntiAffinityFilter not configured"
+msgstr ""
+
+#: nova/scheduler/host_manager.py:169
#, python-format
msgid "Metric name unknown of %r"
msgstr ""
-#: nova/scheduler/host_manager.py:188
+#: nova/scheduler/host_manager.py:184
#, python-format
msgid ""
"Host has more disk space than database expected (%(physical)sgb > "
"%(database)sgb)"
msgstr ""
-#: nova/scheduler/host_manager.py:365
+#: nova/scheduler/host_manager.py:311
#, fuzzy, python-format
msgid "Host filter ignoring hosts: %s"
msgstr "Host filter fails for ignored host %(host)s"
-#: nova/scheduler/host_manager.py:377
+#: nova/scheduler/host_manager.py:323
#, fuzzy, python-format
msgid "Host filter forcing available hosts to %s"
msgstr "Host filter fails for non-forced host %(host)s"
-#: nova/scheduler/host_manager.py:380
+#: nova/scheduler/host_manager.py:326
#, python-format
msgid "No hosts matched due to not matching 'force_hosts' value of '%s'"
msgstr ""
-#: nova/scheduler/host_manager.py:393
+#: nova/scheduler/host_manager.py:339
#, fuzzy, python-format
msgid "Host filter forcing available nodes to %s"
msgstr "Host filter fails for non-forced host %(host)s"
-#: nova/scheduler/host_manager.py:396
+#: nova/scheduler/host_manager.py:342
#, python-format
msgid "No nodes matched due to not matching 'force_nodes' value of '%s'"
msgstr ""
-#: nova/scheduler/host_manager.py:444
-#: nova/scheduler/filters/trusted_filter.py:208
+#: nova/scheduler/host_manager.py:390
#, python-format
msgid "No service for compute ID %s"
msgstr "No service for compute ID %s"
-#: nova/scheduler/host_manager.py:462
+#: nova/scheduler/host_manager.py:408
#, python-format
msgid "Removing dead compute node %(host)s:%(node)s from scheduler"
msgstr ""
@@ -7182,7 +6768,7 @@ msgstr ""
msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
msgstr "Invalid value for 'scheduler_max_attempts', must be >= 1"
-#: nova/scheduler/utils.py:233
+#: nova/scheduler/utils.py:231
#, python-format
msgid "Ignoring the invalid elements of the option %(name)s: %(options)s"
msgstr ""
@@ -7192,6 +6778,10 @@ msgstr ""
msgid "%(host_state)s has not been heard from in a while"
msgstr ""
+#: nova/scheduler/filters/exact_core_filter.py:36
+msgid "VCPUs not set; assuming CPU collection broken"
+msgstr ""
+
#: nova/servicegroup/api.py:70
#, python-format
msgid "unknown ServiceGroup driver name: %s"
@@ -7272,16 +6862,6 @@ msgstr ""
msgid "ZooKeeperDriver.leave: %(id)s has not joined to the %(gr)s group"
msgstr ""
-#: nova/storage/linuxscsi.py:100
-#, python-format
-msgid "Multipath call failed exit (%(code)s)"
-msgstr ""
-
-#: nova/storage/linuxscsi.py:121
-#, python-format
-msgid "Couldn't find multipath device %s"
-msgstr ""
-
#: nova/tests/fake_ldap.py:33
msgid "Attempted to instantiate singleton"
msgstr "Attempted to instantiate singleton"
@@ -7290,15 +6870,15 @@ msgstr "Attempted to instantiate singleton"
msgid "status must be available"
msgstr "status must be available"
-#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:245
+#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:290
msgid "already attached"
msgstr "already attached"
-#: nova/tests/fake_volume.py:195 nova/volume/cinder.py:256
+#: nova/tests/fake_volume.py:195 nova/volume/cinder.py:301
msgid "Instance and volume not in same availability_zone"
msgstr ""
-#: nova/tests/fake_volume.py:200 nova/volume/cinder.py:262
+#: nova/tests/fake_volume.py:200 nova/volume/cinder.py:307
msgid "already detached"
msgstr "already detached"
@@ -7306,8 +6886,12 @@ msgstr "already detached"
msgid "unexpected role header"
msgstr "unexpected role header"
-#: nova/tests/api/openstack/compute/test_servers.py:3202
-#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2425
+#: nova/tests/api/openstack/test_faults.py:47
+msgid "Should be translated."
+msgstr ""
+
+#: nova/tests/api/openstack/compute/test_servers.py:3279
+#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2438
msgid ""
"Quota exceeded for instances: Requested 1, but already used 10 of 10 "
"instances"
@@ -7315,42 +6899,42 @@ msgstr ""
"Quota exceeded for instances: Requested 1, but already used 10 of 10 "
"instances"
-#: nova/tests/api/openstack/compute/test_servers.py:3207
-#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2430
+#: nova/tests/api/openstack/compute/test_servers.py:3284
+#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2443
msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram"
msgstr "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram"
-#: nova/tests/api/openstack/compute/test_servers.py:3212
-#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2435
+#: nova/tests/api/openstack/compute/test_servers.py:3289
+#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2448
msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores"
msgstr "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores"
-#: nova/tests/compute/test_compute.py:1680
-#: nova/tests/compute/test_compute.py:1707
-#: nova/tests/compute/test_compute.py:1785
-#: nova/tests/compute/test_compute.py:1825
-#: nova/tests/compute/test_compute.py:5546
+#: nova/tests/compute/test_compute.py:1770
+#: nova/tests/compute/test_compute.py:1797
+#: nova/tests/compute/test_compute.py:1875
+#: nova/tests/compute/test_compute.py:1915
+#: nova/tests/compute/test_compute.py:5718
#, python-format
msgid "Running instances: %s"
msgstr "Running instances: %s"
-#: nova/tests/compute/test_compute.py:1687
-#: nova/tests/compute/test_compute.py:1755
-#: nova/tests/compute/test_compute.py:1793
+#: nova/tests/compute/test_compute.py:1777
+#: nova/tests/compute/test_compute.py:1845
+#: nova/tests/compute/test_compute.py:1883
#, python-format
msgid "After terminating instances: %s"
msgstr "After terminating instances: %s"
-#: nova/tests/compute/test_compute.py:5557
+#: nova/tests/compute/test_compute.py:5729
#, python-format
msgid "After force-killing instances: %s"
msgstr "After force-killing instances: %s"
-#: nova/tests/compute/test_compute.py:6173
+#: nova/tests/compute/test_compute.py:6345
msgid "wrong host/node"
msgstr ""
-#: nova/tests/compute/test_compute.py:10753
+#: nova/tests/compute/test_compute.py:10999
#, fuzzy
msgid "spawn error"
msgstr "unknown guestmount error"
@@ -7359,7 +6943,16 @@ msgstr "unknown guestmount error"
msgid "Keypair data is invalid"
msgstr "Keypair data is invalid"
-#: nova/tests/db/test_migrations.py:866
+#: nova/tests/compute/test_resources.py:78
+#, python-format
+msgid "Free %(free)d < requested %(requested)d "
+msgstr ""
+
+#: nova/tests/compute/test_resources.py:329
+msgid "Free CPUs 2.00 VCPUs < requested 5 VCPUs"
+msgstr ""
+
+#: nova/tests/db/test_migrations.py:931
#, python-format
msgid ""
"The following migrations are missing a downgrade:\n"
@@ -7446,35 +7039,58 @@ msgstr "Body: %s"
msgid "Unexpected status code"
msgstr "Unexpected status code"
-#: nova/tests/virt/hyperv/test_hypervapi.py:512
+#: nova/tests/virt/hyperv/test_hypervapi.py:515
#, fuzzy
msgid "fake vswitch not found"
msgstr "marker [%s] not found"
-#: nova/tests/virt/hyperv/test_hypervapi.py:965
+#: nova/tests/virt/hyperv/test_hypervapi.py:968
msgid "Simulated failure"
msgstr ""
-#: nova/tests/virt/libvirt/fakelibvirt.py:1019
+#: nova/tests/virt/libvirt/fakelibvirt.py:1051
msgid "Expected a list for 'auth' parameter"
msgstr "Expected a list for 'auth' parameter"
-#: nova/tests/virt/libvirt/fakelibvirt.py:1023
+#: nova/tests/virt/libvirt/fakelibvirt.py:1055
msgid "Expected a function in 'auth[0]' parameter"
msgstr "Expected a function in 'auth[0]' parameter"
-#: nova/tests/virt/libvirt/fakelibvirt.py:1027
+#: nova/tests/virt/libvirt/fakelibvirt.py:1059
msgid "Expected a function in 'auth[1]' parameter"
msgstr "Expected a function in 'auth[1]' parameter"
-#: nova/tests/virt/libvirt/fakelibvirt.py:1038
+#: nova/tests/virt/libvirt/fakelibvirt.py:1070
msgid ""
"virEventRegisterDefaultImpl() must be called before "
"connection is used."
msgstr ""
-#: nova/tests/virt/vmwareapi/test_vm_util.py:196
-#: nova/virt/vmwareapi/vm_util.py:1087
+#: nova/tests/virt/vmwareapi/fake.py:241
+#, python-format
+msgid "Property %(attr)s not set for the managed object %(name)s"
+msgstr "Property %(attr)s not set for the managed object %(name)s"
+
+#: nova/tests/virt/vmwareapi/fake.py:985
+msgid "There is no VM registered"
+msgstr "There is no VM registered"
+
+#: nova/tests/virt/vmwareapi/fake.py:987 nova/tests/virt/vmwareapi/fake.py:1338
+#, python-format
+msgid "Virtual Machine with ref %s is not there"
+msgstr "Virtual Machine with ref %s is not there"
+
+#: nova/tests/virt/vmwareapi/fake.py:1127
+msgid "Session Invalid"
+msgstr "Session Invalid"
+
+#: nova/tests/virt/vmwareapi/fake.py:1335
+#, fuzzy
+msgid "No Virtual Machine has been registered yet"
+msgstr " No Virtual Machine has been registered yet"
+
+#: nova/tests/virt/vmwareapi/test_ds_util.py:215
+#: nova/virt/vmwareapi/ds_util.py:261
#, python-format
msgid "Datastore regex %s did not match any datastores"
msgstr ""
@@ -7486,102 +7102,136 @@ msgid ""
"left to copy"
msgstr ""
-#: nova/tests/virt/xenapi/image/test_bittorrent.py:126
-#: nova/virt/xenapi/image/bittorrent.py:81
+#: nova/tests/virt/xenapi/image/test_bittorrent.py:125
+#: nova/virt/xenapi/image/bittorrent.py:80
msgid ""
"Cannot create default bittorrent URL without torrent_base_url set or "
"torrent URL fetcher extension"
msgstr ""
-#: nova/tests/virt/xenapi/image/test_bittorrent.py:160
-#: nova/virt/xenapi/image/bittorrent.py:85
+#: nova/tests/virt/xenapi/image/test_bittorrent.py:159
+#: nova/virt/xenapi/image/bittorrent.py:84
msgid "Multiple torrent URL fetcher extensions found. Failing."
msgstr ""
-#: nova/virt/block_device.py:243
+#: nova/virt/block_device.py:255
#, python-format
msgid "Driver failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/virt/block_device.py:362
+#: nova/virt/block_device.py:401
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr "Booting with volume %(volume_id)s at %(mountpoint)s"
-#: nova/virt/cpu.py:56 nova/virt/cpu.py:60
+#: nova/virt/diagnostics.py:143
#, python-format
-msgid "Invalid range expression %r"
+msgid "Invalid type for %s"
msgstr ""
-#: nova/virt/cpu.py:69
-#, fuzzy, python-format
-msgid "Invalid exclusion expression %r"
-msgstr "Invalid reservation expiration %(expire)s."
-
-#: nova/virt/cpu.py:76
-#, fuzzy, python-format
-msgid "Invalid inclusion expression %r"
-msgstr "Invalid reservation expiration %(expire)s."
-
-#: nova/virt/cpu.py:81
+#: nova/virt/diagnostics.py:147
#, python-format
-msgid "No CPUs available after parsing %r"
+msgid "Invalid type for %s entry"
msgstr ""
-#: nova/virt/driver.py:1207
+#: nova/virt/driver.py:708
+msgid "Hypervisor driver does not support post_live_migration_at_source method"
+msgstr ""
+
+#: nova/virt/driver.py:1264
msgid "Event must be an instance of nova.virt.event.Event"
msgstr ""
-#: nova/virt/driver.py:1213
+#: nova/virt/driver.py:1270
#, python-format
msgid "Exception dispatching event %(event)s: %(ex)s"
msgstr ""
-#: nova/virt/driver.py:1295
+#: nova/virt/driver.py:1364
msgid "Compute driver option required, but not specified"
msgstr "Compute driver option required, but not specified"
-#: nova/virt/driver.py:1298
+#: nova/virt/driver.py:1367
#, python-format
msgid "Loading compute driver '%s'"
msgstr "Loading compute driver '%s'"
-#: nova/virt/driver.py:1305
+#: nova/virt/driver.py:1374
#, fuzzy
msgid "Unable to load the virtualization driver"
msgstr "Unable to load the virtualization driver: %s"
-#: nova/virt/fake.py:216
+#: nova/virt/event.py:33
+msgid "Started"
+msgstr ""
+
+#: nova/virt/event.py:34
+msgid "Stopped"
+msgstr ""
+
+#: nova/virt/event.py:35
+msgid "Paused"
+msgstr ""
+
+#: nova/virt/event.py:36
+msgid "Resumed"
+msgstr ""
+
+#: nova/virt/event.py:108
+msgid "Unknown"
+msgstr ""
+
+#: nova/virt/fake.py:217
#, python-format
msgid "Key '%(key)s' not in instances '%(inst)s'"
msgstr ""
-#: nova/virt/firewall.py:178
+#: nova/virt/firewall.py:174
msgid "Attempted to unfilter instance which is not filtered"
msgstr "Attempted to unfilter instance which is not filtered"
-#: nova/virt/images.py:86
+#: nova/virt/hardware.py:46
+#, python-format
+msgid "No CPUs available after parsing %r"
+msgstr ""
+
+#: nova/virt/hardware.py:78 nova/virt/hardware.py:82
+#, python-format
+msgid "Invalid range expression %r"
+msgstr ""
+
+#: nova/virt/hardware.py:91
+#, fuzzy, python-format
+msgid "Invalid exclusion expression %r"
+msgstr "Invalid reservation expiration %(expire)s."
+
+#: nova/virt/hardware.py:98
+#, fuzzy, python-format
+msgid "Invalid inclusion expression %r"
+msgstr "Invalid reservation expiration %(expire)s."
+
+#: nova/virt/images.py:81
msgid "'qemu-img info' parsing failed."
msgstr "'qemu-img info' parsing failed."
-#: nova/virt/images.py:92
+#: nova/virt/images.py:87
#, python-format
msgid "fmt=%(fmt)s backed by: %(backing_file)s"
msgstr "fmt=%(fmt)s backed by: %(backing_file)s"
-#: nova/virt/images.py:105
+#: nova/virt/images.py:100
#, python-format
msgid ""
"%(base)s virtual size %(disk_size)s larger than flavor root disk size "
"%(size)s"
msgstr ""
-#: nova/virt/images.py:122
+#: nova/virt/images.py:117
#, python-format
msgid "Converted to raw, but format is now %s"
msgstr "Converted to raw, but format is now %s"
-#: nova/virt/storage_users.py:63 nova/virt/storage_users.py:101
+#: nova/virt/storage_users.py:64 nova/virt/storage_users.py:102
#, python-format
msgid "Cannot decode JSON from %(id_path)s"
msgstr ""
@@ -7614,37 +7264,37 @@ msgstr ""
msgid "Baremetal node id not supplied to driver for %r"
msgstr ""
-#: nova/virt/baremetal/driver.py:289
+#: nova/virt/baremetal/driver.py:292
#, python-format
msgid "Error deploying instance %(instance)s on baremetal node %(node)s."
msgstr ""
-#: nova/virt/baremetal/driver.py:364
+#: nova/virt/baremetal/driver.py:367
#, python-format
msgid "Baremetal power manager failed to restart node for instance %r"
msgstr ""
-#: nova/virt/baremetal/driver.py:375
+#: nova/virt/baremetal/driver.py:379
#, fuzzy, python-format
msgid "Destroy called on non-existing instance %s"
msgstr "get_info called for instance"
-#: nova/virt/baremetal/driver.py:393
+#: nova/virt/baremetal/driver.py:397
#, python-format
msgid "Error from baremetal driver during destroy: %s"
msgstr ""
-#: nova/virt/baremetal/driver.py:398
+#: nova/virt/baremetal/driver.py:402
#, python-format
msgid "Error while recording destroy failure in baremetal database: %s"
msgstr ""
-#: nova/virt/baremetal/driver.py:413
+#: nova/virt/baremetal/driver.py:417
#, python-format
msgid "Baremetal power manager failed to stop node for instance %r"
msgstr ""
-#: nova/virt/baremetal/driver.py:426
+#: nova/virt/baremetal/driver.py:430
#, python-format
msgid "Baremetal power manager failed to start node for instance %r"
msgstr ""
@@ -7732,7 +7382,7 @@ msgid ""
"passed to baremetal driver: %s"
msgstr ""
-#: nova/virt/baremetal/pxe.py:465 nova/virt/baremetal/tilera.py:317
+#: nova/virt/baremetal/pxe.py:465 nova/virt/baremetal/tilera.py:318
#, python-format
msgid "Node associated with another instance while waiting for deploy of %s"
msgstr ""
@@ -7752,7 +7402,7 @@ msgstr "Get console output for instance %s"
msgid "PXE deploy failed for instance %s"
msgstr "empty project id for instance %s"
-#: nova/virt/baremetal/pxe.py:483 nova/virt/baremetal/tilera.py:342
+#: nova/virt/baremetal/pxe.py:483 nova/virt/baremetal/tilera.py:343
#, python-format
msgid "Baremetal node deleted while waiting for deployment of instance %s"
msgstr ""
@@ -7769,21 +7419,21 @@ msgid ""
"not passed to baremetal driver: %s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:323
+#: nova/virt/baremetal/tilera.py:324
#, fuzzy, python-format
msgid "Tilera deploy started for instance %s"
msgstr "empty project id for instance %s"
-#: nova/virt/baremetal/tilera.py:329
+#: nova/virt/baremetal/tilera.py:330
#, fuzzy, python-format
msgid "Tilera deploy completed for instance %s"
msgstr "Get console output for instance %s"
-#: nova/virt/baremetal/tilera.py:337
+#: nova/virt/baremetal/tilera.py:338
msgid "Node is unknown error state."
msgstr "Node is unknown error state."
-#: nova/virt/baremetal/tilera.py:340
+#: nova/virt/baremetal/tilera.py:341
#, fuzzy, python-format
msgid "Tilera deploy failed for instance %s"
msgstr "Unable to find host for Instance %s"
@@ -7888,88 +7538,69 @@ msgstr ""
msgid "baremetal driver was unable to delete tid %s"
msgstr ""
-#: nova/virt/baremetal/volume_driver.py:195 nova/virt/hyperv/volumeops.py:189
+#: nova/virt/baremetal/volume_driver.py:195 nova/virt/hyperv/volumeops.py:196
msgid "Could not determine iscsi initiator name"
msgstr "Could not determine iscsi initiator name"
-#: nova/virt/baremetal/volume_driver.py:234
+#: nova/virt/baremetal/volume_driver.py:225
#, fuzzy, python-format
msgid "No fixed PXE IP is associated to %s"
msgstr "No fixed ips associated to instance"
-#: nova/virt/baremetal/volume_driver.py:288
+#: nova/virt/baremetal/volume_driver.py:283
#, python-format
msgid "detach volume could not find tid for %s"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:198
+#: nova/virt/baremetal/db/sqlalchemy/api.py:199
msgid "instance_uuid must be supplied to bm_node_associate_and_update"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:210
+#: nova/virt/baremetal/db/sqlalchemy/api.py:211
#, python-format
msgid "Failed to associate instance %(i_uuid)s to baremetal node %(n_uuid)s."
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:245
-#: nova/virt/baremetal/db/sqlalchemy/api.py:287
+#: nova/virt/baremetal/db/sqlalchemy/api.py:246
+#: nova/virt/baremetal/db/sqlalchemy/api.py:288
#, fuzzy, python-format
msgid "Baremetal interface %s not found"
msgstr "partition %s not found"
-#: nova/virt/baremetal/db/sqlalchemy/api.py:297
+#: nova/virt/baremetal/db/sqlalchemy/api.py:298
#, fuzzy, python-format
msgid "Baremetal interface %s already in use"
msgstr "Virtual Interface creation failed"
-#: nova/virt/baremetal/db/sqlalchemy/api.py:310
+#: nova/virt/baremetal/db/sqlalchemy/api.py:311
#, fuzzy, python-format
msgid "Baremetal virtual interface %s not found"
msgstr "partition %s not found"
-#: nova/virt/disk/api.py:285
+#: nova/virt/disk/api.py:292
msgid "image already mounted"
msgstr "image already mounted"
-#: nova/virt/disk/api.py:359
-#, fuzzy, python-format
-msgid "Ignoring error injecting data into image (%(e)s)"
-msgstr "Ignoring error injecting data into image %(img_id)s (%(e)s)"
-
-#: nova/virt/disk/api.py:381
-#, python-format
-msgid ""
-"Failed to mount container filesystem '%(image)s' on '%(target)s': "
-"%(errors)s"
-msgstr ""
-"Failed to mount container filesystem '%(image)s' on '%(target)s': "
-"%(errors)s"
-
-#: nova/virt/disk/api.py:411
+#: nova/virt/disk/api.py:418
#, python-format
msgid "Failed to teardown container filesystem: %s"
msgstr ""
-#: nova/virt/disk/api.py:424
+#: nova/virt/disk/api.py:431
#, fuzzy, python-format
msgid "Failed to umount container filesystem: %s"
msgstr "Failed to unmount container filesystem: %s"
-#: nova/virt/disk/api.py:449
-#, fuzzy, python-format
-msgid "Ignoring error injecting %(inject)s into image (%(e)s)"
-msgstr "Ignoring error injecting data into image %(img_id)s (%(e)s)"
-
-#: nova/virt/disk/api.py:609
+#: nova/virt/disk/api.py:616
msgid "Not implemented on Windows"
msgstr "Not implemented on Windows"
-#: nova/virt/disk/api.py:636
+#: nova/virt/disk/api.py:643
#, python-format
msgid "User %(username)s not found in password file."
msgstr "User %(username)s not found in password file."
-#: nova/virt/disk/api.py:652
+#: nova/virt/disk/api.py:659
#, python-format
msgid "User %(username)s not found in shadow file."
msgstr "User %(username)s not found in shadow file."
@@ -8050,44 +7681,44 @@ msgstr "nbd device %s did not show up"
msgid "Detaching from erroneous nbd device returned error: %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:64
+#: nova/virt/disk/vfs/guestfs.py:77
#, fuzzy, python-format
msgid "No operating system found in %s"
msgstr "Floating ip not found for id %s"
-#: nova/virt/disk/vfs/guestfs.py:70
+#: nova/virt/disk/vfs/guestfs.py:83
#, python-format
msgid "Multi-boot operating system found in %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:81
+#: nova/virt/disk/vfs/guestfs.py:94
#, python-format
msgid "No mount points found in %(root)s of %(imgfile)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:95
+#: nova/virt/disk/vfs/guestfs.py:108
#, python-format
msgid ""
"Error mounting %(device)s to %(dir)s in image %(imgfile)s with libguestfs"
" (%(e)s)"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:131
+#: nova/virt/disk/vfs/guestfs.py:156
#, python-format
msgid "Error mounting %(imgfile)s with libguestfs (%(e)s)"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:147
+#: nova/virt/disk/vfs/guestfs.py:172
#, fuzzy, python-format
msgid "Failed to close augeas %s"
msgstr "Failed to live migrate VM %s"
-#: nova/virt/disk/vfs/guestfs.py:155
+#: nova/virt/disk/vfs/guestfs.py:180
#, python-format
msgid "Failed to shutdown appliance %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:163
+#: nova/virt/disk/vfs/guestfs.py:188
#, fuzzy, python-format
msgid "Failed to close guest handle %s"
msgstr "Failed to understand rule %(rule)s"
@@ -8101,11 +7732,11 @@ msgstr "injected file path not valid"
msgid "The ISCSI initiator name can't be found. Choosing the default one"
msgstr "The ISCSI initiator name can't be found. Choosing the default one"
-#: nova/virt/hyperv/driver.py:165
+#: nova/virt/hyperv/driver.py:169
msgid "VIF plugging is not supported by the Hyper-V driver."
msgstr ""
-#: nova/virt/hyperv/driver.py:170
+#: nova/virt/hyperv/driver.py:174
msgid "VIF unplugging is not supported by the Hyper-V driver."
msgstr ""
@@ -8153,22 +7784,27 @@ msgstr "Item not found"
msgid "Duplicate VM name found: %s"
msgstr "duplicate name found: %s"
-#: nova/virt/hyperv/migrationops.py:97
+#: nova/virt/hyperv/migrationops.py:98
msgid "Cannot cleanup migration files"
msgstr ""
-#: nova/virt/hyperv/migrationops.py:105
+#: nova/virt/hyperv/migrationops.py:106
#, python-format
msgid ""
"Cannot resize the root disk to a smaller size. Current size: "
"%(curr_root_gb)s GB. Requested size: %(new_root_gb)s GB"
msgstr ""
-#: nova/virt/hyperv/migrationops.py:200
+#: nova/virt/hyperv/migrationops.py:155
+#, python-format
+msgid "Config drive is required by instance: %s, but it does not exist."
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:214
msgid "Cannot resize a VHD to a smaller size"
msgstr ""
-#: nova/virt/hyperv/migrationops.py:245
+#: nova/virt/hyperv/migrationops.py:259
#, python-format
msgid "Cannot find boot VHD file for instance: %s"
msgstr ""
@@ -8187,7 +7823,7 @@ msgstr "Created switch port %(vm_name)s on switch %(ext_path)s"
msgid "No external vswitch found"
msgstr ""
-#: nova/virt/hyperv/pathutils.py:71
+#: nova/virt/hyperv/pathutils.py:73
#, python-format
msgid "The file copy from %(src)s to %(dest)s failed"
msgstr ""
@@ -8197,30 +7833,32 @@ msgstr ""
msgid "Failed to remove snapshot for VM %s"
msgstr "Failed to remove snapshot for VM %s"
-#: nova/virt/hyperv/vhdutils.py:65 nova/virt/hyperv/vhdutilsv2.py:63
+#: nova/virt/hyperv/utilsfactory.py:68
+msgid ""
+"The \"force_hyperv_utils_v1\" option cannot be set to \"True\" on Windows"
+" Server / Hyper-V Server 2012 R2 or above as the WMI "
+"\"root/virtualization\" namespace is no longer supported."
+msgstr ""
+
+#: nova/virt/hyperv/vhdutils.py:66 nova/virt/hyperv/vhdutilsv2.py:64
#, python-format
msgid "Unsupported disk format: %s"
msgstr ""
-#: nova/virt/hyperv/vhdutils.py:150
-#, python-format
-msgid "The %(vhd_type)s type VHD is not supported"
+#: nova/virt/hyperv/vhdutils.py:77
+msgid "VHD differencing disks cannot be resized"
msgstr ""
-#: nova/virt/hyperv/vhdutils.py:161
+#: nova/virt/hyperv/vhdutils.py:165
#, python-format
msgid "Unable to obtain block size from VHD %(vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/vhdutils.py:208
+#: nova/virt/hyperv/vhdutils.py:212
msgid "Unsupported virtual disk format"
msgstr ""
-#: nova/virt/hyperv/vhdutilsv2.py:134
-msgid "Differencing VHDX images are not supported"
-msgstr ""
-
-#: nova/virt/hyperv/vhdutilsv2.py:157
+#: nova/virt/hyperv/vhdutilsv2.py:160
#, python-format
msgid "Unable to obtain internal size from VHDX: %(vhd_path)s. Exception: %(ex)s"
msgstr ""
@@ -8230,47 +7868,47 @@ msgstr ""
msgid "VIF driver not found for network_api_class: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:169
+#: nova/virt/hyperv/vmops.py:198
#, python-format
msgid ""
-"Cannot resize a VHD to a smaller size, the original size is "
-"%(base_vhd_size)s, the newer size is %(root_vhd_size)s"
+"Cannot resize a VHD to a smaller size, the original size is %(old_size)s,"
+" the newer size is %(new_size)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:206
+#: nova/virt/hyperv/vmops.py:228
#, fuzzy
msgid "Spawning new instance"
msgstr "Starting instance"
-#: nova/virt/hyperv/vmops.py:280 nova/virt/vmwareapi/vmops.py:520
+#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:574
#, python-format
msgid "Invalid config_drive_format \"%s\""
msgstr ""
-#: nova/virt/hyperv/vmops.py:283 nova/virt/vmwareapi/vmops.py:524
+#: nova/virt/hyperv/vmops.py:307 nova/virt/vmwareapi/vmops.py:578
msgid "Using config drive for instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:296
+#: nova/virt/hyperv/vmops.py:320
#, python-format
msgid "Creating config drive at %(path)s"
msgstr "Creating config drive at %(path)s"
-#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:549
+#: nova/virt/hyperv/vmops.py:328 nova/virt/vmwareapi/vmops.py:603
#, fuzzy, python-format
msgid "Creating config drive failed with error: %s"
msgstr "Creating config drive at %(path)s"
-#: nova/virt/hyperv/vmops.py:340
+#: nova/virt/hyperv/vmops.py:371
msgid "Got request to destroy instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:359
+#: nova/virt/hyperv/vmops.py:390
#, fuzzy, python-format
msgid "Failed to destroy instance: %s"
msgstr "Failed to destroy vm %s"
-#: nova/virt/hyperv/vmops.py:412
+#: nova/virt/hyperv/vmops.py:443
#, python-format
msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
msgstr "Failed to change vm state of %(vm_name)s to %(req_state)s"
@@ -8312,12 +7950,12 @@ msgstr ""
msgid "Metrics collection is not supported on this version of Hyper-V"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:146
+#: nova/virt/hyperv/volumeops.py:148
#, python-format
msgid "Unable to attach volume to instance %s"
msgstr "Unable to attach volume to instance %s"
-#: nova/virt/hyperv/volumeops.py:215 nova/virt/hyperv/volumeops.py:229
+#: nova/virt/hyperv/volumeops.py:222 nova/virt/hyperv/volumeops.py:236
#, python-format
msgid "Unable to find a mounted disk for target_iqn: %s"
msgstr "Unable to find a mounted disk for target_iqn: %s"
@@ -8347,78 +7985,99 @@ msgstr ""
msgid "Unable to determine disk bus for '%s'"
msgstr "Unable to find vbd for vdi %s"
-#: nova/virt/libvirt/driver.py:542
+#: nova/virt/libvirt/driver.py:550
#, python-format
msgid "Connection to libvirt lost: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:724
+#: nova/virt/libvirt/driver.py:739
#, python-format
msgid "Can not handle authentication request for %d credentials"
msgstr "Can not handle authentication request for %d credentials"
-#: nova/virt/libvirt/driver.py:868
+#: nova/virt/libvirt/driver.py:922
msgid "operation time out"
msgstr ""
-#: nova/virt/libvirt/driver.py:1187
+#: nova/virt/libvirt/driver.py:1246
#, python-format
msgid ""
"Volume sets block size, but the current libvirt hypervisor '%s' does not "
"support custom block size"
msgstr ""
-#: nova/virt/libvirt/driver.py:1194
+#: nova/virt/libvirt/driver.py:1253
#, python-format
msgid "Volume sets block size, but libvirt '%s' or later is required."
msgstr ""
-#: nova/virt/libvirt/driver.py:1292
+#: nova/virt/libvirt/driver.py:1351
msgid "Swap only supports host devices"
msgstr ""
-#: nova/virt/libvirt/driver.py:1579
+#: nova/virt/libvirt/driver.py:1638
msgid "libvirt error while requesting blockjob info."
msgstr ""
-#: nova/virt/libvirt/driver.py:1712
+#: nova/virt/libvirt/driver.py:1783
msgid "Found no disk to snapshot."
msgstr ""
-#: nova/virt/libvirt/driver.py:1790
+#: nova/virt/libvirt/driver.py:1875
#, python-format
msgid "Unknown type: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1795
+#: nova/virt/libvirt/driver.py:1880
msgid "snapshot_id required in create_info"
msgstr ""
-#: nova/virt/libvirt/driver.py:1853
+#: nova/virt/libvirt/driver.py:1938
#, python-format
msgid "Libvirt '%s' or later is required for online deletion of volume snapshots."
msgstr ""
-#: nova/virt/libvirt/driver.py:1860
+#: nova/virt/libvirt/driver.py:1945
#, python-format
msgid "Unknown delete_info type %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1890
+#: nova/virt/libvirt/driver.py:1981
+#, python-format
+msgid "Disk with id: %s not found attached to instance."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1990
+msgid "filename cannot be None"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2019
#, python-format
-msgid "Unable to locate disk matching id: %s"
+msgid "no match found for %s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2076
+#, python-format
+msgid ""
+"Relative blockcommit support was not detected. Libvirt '%s' or later is "
+"required for online deletion of network storage-backed volume snapshots."
msgstr ""
-#: nova/virt/libvirt/driver.py:2330 nova/virt/xenapi/vmops.py:1552
+#: nova/virt/libvirt/driver.py:2491 nova/virt/xenapi/vmops.py:1561
msgid "Guest does not have a console available"
msgstr "Guest does not have a console available"
-#: nova/virt/libvirt/driver.py:2746
+#: nova/virt/libvirt/driver.py:2820
+#, python-format
+msgid "%s format is not supported"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2926
#, python-format
msgid "Detaching PCI devices with libvirt < %(ver)s is not permitted"
msgstr ""
-#: nova/virt/libvirt/driver.py:2912
+#: nova/virt/libvirt/driver.py:3069
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
@@ -8427,30 +8086,22 @@ msgstr ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
-#: nova/virt/libvirt/driver.py:2918
+#: nova/virt/libvirt/driver.py:3075
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr "Config requested a custom CPU model, but no model name was provided"
-#: nova/virt/libvirt/driver.py:2922
+#: nova/virt/libvirt/driver.py:3079
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr "A CPU model name should not be set when a host CPU model is requested"
-#: nova/virt/libvirt/driver.py:2942
-msgid ""
-"Passthrough of the host CPU was requested but this libvirt version does "
-"not support this feature"
-msgstr ""
-"Passthrough of the host CPU was requested but this libvirt version does "
-"not support this feature"
-
-#: nova/virt/libvirt/driver.py:3475
+#: nova/virt/libvirt/driver.py:3689
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_id)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3496
+#: nova/virt/libvirt/driver.py:3710
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
@@ -8459,23 +8110,23 @@ msgstr ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
"%(error_code)s] %(ex)s"
-#: nova/virt/libvirt/driver.py:3760
+#: nova/virt/libvirt/driver.py:3976
msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range."
msgstr ""
-#: nova/virt/libvirt/driver.py:3890
+#: nova/virt/libvirt/driver.py:4101
msgid "libvirt version is too old (does not support getVersion)"
msgstr "libvirt version is too old (does not support getVersion)"
-#: nova/virt/libvirt/driver.py:4251
+#: nova/virt/libvirt/driver.py:4462
msgid "Block migration can not be used with shared storage."
msgstr "Block migration can not be used with shared storage."
-#: nova/virt/libvirt/driver.py:4259
+#: nova/virt/libvirt/driver.py:4471
msgid "Live migration can not be used without shared storage."
msgstr "Live migration can not be used without shared storage."
-#: nova/virt/libvirt/driver.py:4303
+#: nova/virt/libvirt/driver.py:4541
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
@@ -8484,7 +8135,7 @@ msgstr ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
-#: nova/virt/libvirt/driver.py:4342
+#: nova/virt/libvirt/driver.py:4580
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -8499,12 +8150,38 @@ msgstr ""
"\n"
"Refer to %(u)s"
-#: nova/virt/libvirt/driver.py:4409
+#: nova/virt/libvirt/driver.py:4643
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr "The firewall filter for %s does not exist"
-#: nova/virt/libvirt/driver.py:4900
+#: nova/virt/libvirt/driver.py:4706
+msgid ""
+"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag "
+"or your destination node does not support retrieving listen addresses. "
+"In order for live migration to work properly, you must configure the "
+"graphics (VNC and/or SPICE) listen addresses to be either the catch-all "
+"address (0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:4723
+msgid ""
+"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag,"
+" and the graphics (VNC and/or SPICE) listen addresses on the destination"
+" node do not match the addresses on the source node. Since the source "
+"node has listen addresses set to either the catch-all address (0.0.0.0 or"
+" ::) or the local address (127.0.0.1 or ::1), the live migration will "
+"succeed, but the VM will continue to listen on the current addresses."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:5100
+#, python-format
+msgid ""
+"Error from libvirt while getting description of %(instance_name)s: [Error"
+" Code %(error_code)s] %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:5226
msgid "Unable to resize disk down."
msgstr ""
@@ -8517,24 +8194,36 @@ msgstr ""
msgid "Attempted overwrite of an existing value."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:429
+#: nova/virt/libvirt/imagebackend.py:316
+msgid "clone() is not implemented"
+msgstr ""
+
+#: nova/virt/libvirt/imagebackend.py:449
msgid "You should specify images_volume_group flag to use LVM images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:544
+#: nova/virt/libvirt/imagebackend.py:522
msgid "You should specify images_rbd_pool flag to use rbd images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:658
-msgid "rbd python libraries not found"
+#: nova/virt/libvirt/imagebackend.py:612
+msgid "installed version of librbd does not support cloning"
+msgstr ""
+
+#: nova/virt/libvirt/imagebackend.py:623
+msgid "Image is not raw format"
+msgstr ""
+
+#: nova/virt/libvirt/imagebackend.py:631
+msgid "No image locations are accessible"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:697
+#: nova/virt/libvirt/imagebackend.py:651
#, python-format
msgid "Unknown image_type=%s"
msgstr "Unknown image_type=%s"
-#: nova/virt/libvirt/lvm.py:55
+#: nova/virt/libvirt/lvm.py:54
#, python-format
msgid ""
"Insufficient Space on Volume Group %(vg)s. Only %(free_space)db "
@@ -8543,294 +8232,255 @@ msgstr ""
"Insufficient Space on Volume Group %(vg)s. Only %(free_space)db "
"available, but %(size)db required by volume %(lv)s."
-#: nova/virt/libvirt/lvm.py:103
+#: nova/virt/libvirt/lvm.py:102
#, fuzzy, python-format
msgid "vg %s must be LVM volume group"
msgstr "Path %s must be LVM logical volume"
-#: nova/virt/libvirt/lvm.py:146
+#: nova/virt/libvirt/lvm.py:145
#, python-format
msgid "Path %s must be LVM logical volume"
msgstr "Path %s must be LVM logical volume"
-#: nova/virt/libvirt/lvm.py:222
+#: nova/virt/libvirt/lvm.py:221
#, python-format
msgid "volume_clear='%s' is not handled"
msgstr ""
+#: nova/virt/libvirt/rbd_utils.py:104
+msgid "rbd python libraries not found"
+msgstr ""
+
+#: nova/virt/libvirt/rbd_utils.py:159
+msgid "Not stored in rbd"
+msgstr ""
+
+#: nova/virt/libvirt/rbd_utils.py:163
+msgid "Blank components"
+msgstr ""
+
+#: nova/virt/libvirt/rbd_utils.py:166
+msgid "Not an rbd snapshot"
+msgstr ""
+
#: nova/virt/libvirt/utils.py:79
msgid "Cannot find any Fibre Channel HBAs"
msgstr ""
-#: nova/virt/libvirt/utils.py:431
+#: nova/virt/libvirt/utils.py:391
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr "Can't retrieve root device path from instance libvirt configuration"
-#: nova/virt/libvirt/vif.py:353 nova/virt/libvirt/vif.py:608
-#: nova/virt/libvirt/vif.py:797
+#: nova/virt/libvirt/vif.py:322 nova/virt/libvirt/vif.py:508
+#: nova/virt/libvirt/vif.py:652
msgid "vif_type parameter must be present for this vif_driver implementation"
msgstr ""
-#: nova/virt/libvirt/vif.py:397 nova/virt/libvirt/vif.py:628
-#: nova/virt/libvirt/vif.py:817
+#: nova/virt/libvirt/vif.py:328 nova/virt/libvirt/vif.py:514
+#: nova/virt/libvirt/vif.py:658
#, fuzzy, python-format
msgid "Unexpected vif_type=%s"
msgstr "Unexpected error: %s"
-#: nova/virt/libvirt/volume.py:291
+#: nova/virt/libvirt/volume.py:294
#, python-format
msgid "iSCSI device not found at %s"
msgstr "iSCSI device not found at %s"
-#: nova/virt/libvirt/volume.py:737
+#: nova/virt/libvirt/volume.py:740
#, fuzzy, python-format
msgid "AoE device not found at %s"
msgstr "iSCSI device not found at %s"
-#: nova/virt/libvirt/volume.py:909
+#: nova/virt/libvirt/volume.py:912
msgid "We are unable to locate any Fibre Channel devices"
msgstr ""
-#: nova/virt/libvirt/volume.py:928
+#: nova/virt/libvirt/volume.py:931
#, fuzzy
msgid "Fibre Channel device not found."
msgstr "iSCSI device not found at %s"
-#: nova/virt/vmwareapi/driver.py:103
-msgid ""
-"The VMware ESX driver is now deprecated and will be removed in the Juno "
-"release. The VC driver will remain and continue to be supported."
-msgstr ""
-
-#: nova/virt/vmwareapi/driver.py:115
+#: nova/virt/vmwareapi/driver.py:125
msgid ""
"Must specify host_ip, host_username and host_password to use "
-"compute_driver=vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver"
+"vmwareapi.VMwareVCDriver"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:127
+#: nova/virt/vmwareapi/driver.py:134
#, python-format
msgid "Invalid Regular Expression %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:242
-msgid "Instance cannot be found in host, or in an unknownstate."
-msgstr ""
-
-#: nova/virt/vmwareapi/driver.py:398
+#: nova/virt/vmwareapi/driver.py:148
#, python-format
msgid "All clusters specified %s were not found in the vCenter"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:407
-#, python-format
-msgid "The following clusters could not be found in the vCenter %s"
-msgstr ""
-
-#: nova/virt/vmwareapi/driver.py:544
+#: nova/virt/vmwareapi/driver.py:342
#, python-format
msgid "The resource %s does not exist"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:590
+#: nova/virt/vmwareapi/driver.py:404
#, python-format
msgid "Invalid cluster or resource pool name : %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:757
+#: nova/virt/vmwareapi/driver.py:582
msgid ""
"Multiple hosts may be managed by the VMWare vCenter driver; therefore we "
"do not return uptime for just one host."
msgstr ""
-#: nova/virt/vmwareapi/driver.py:845
-#, python-format
-msgid ""
-"Unable to connect to server at %(server)s, sleeping for %(seconds)s "
-"seconds"
-msgstr ""
-
-#: nova/virt/vmwareapi/driver.py:865
+#: nova/virt/vmwareapi/driver.py:705
#, python-format
msgid "Unable to validate session %s!"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:906
+#: nova/virt/vmwareapi/driver.py:747
#, python-format
msgid "Session %s is inactive!"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:954
-#, python-format
-msgid "In vmwareapi: _call_method (session=%s)"
-msgstr ""
-
-#: nova/virt/vmwareapi/driver.py:998
+#: nova/virt/vmwareapi/driver.py:838
#, python-format
msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s"
msgstr "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s"
-#: nova/virt/vmwareapi/driver.py:1008
+#: nova/virt/vmwareapi/driver.py:848
#, python-format
msgid "In vmwareapi:_poll_task, Got this error %s"
msgstr "In vmwareapi:_poll_task, Got this error %s"
-#: nova/virt/vmwareapi/ds_util.py:38
+#: nova/virt/vmwareapi/ds_util.py:41
msgid "Datastore name cannot be None"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:40
+#: nova/virt/vmwareapi/ds_util.py:43
msgid "Datastore reference cannot be None"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:42
+#: nova/virt/vmwareapi/ds_util.py:45
msgid "Invalid capacity"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:45
+#: nova/virt/vmwareapi/ds_util.py:48
msgid "Capacity is smaller than free space"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:106
+#: nova/virt/vmwareapi/ds_util.py:111
msgid "datastore name empty"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:111
+#: nova/virt/vmwareapi/ds_util.py:116 nova/virt/vmwareapi/ds_util.py:148
msgid "path component cannot be None"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:144
+#: nova/virt/vmwareapi/ds_util.py:162
msgid "datastore path empty"
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:46
+#: nova/virt/vmwareapi/error_util.py:45
msgid "exception_summary must not be a list"
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:76
+#: nova/virt/vmwareapi/error_util.py:75
msgid "fault_list must be a list"
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:122
+#: nova/virt/vmwareapi/error_util.py:121
#, python-format
msgid "Error(s) %s occurred in the call to RetrievePropertiesEx"
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:136
+#: nova/virt/vmwareapi/error_util.py:135
msgid "VMware Driver fault."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:142
+#: nova/virt/vmwareapi/error_util.py:141
msgid "VMware Driver configuration fault."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:146
+#: nova/virt/vmwareapi/error_util.py:145
msgid "No default value for use_linked_clone found."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:150
+#: nova/virt/vmwareapi/error_util.py:149
#, python-format
msgid "Missing parameter : %(param)s"
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:154
+#: nova/virt/vmwareapi/error_util.py:153
msgid "No root disk defined."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:158
+#: nova/virt/vmwareapi/error_util.py:157
msgid "Resource already exists."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:163
+#: nova/virt/vmwareapi/error_util.py:162
msgid "Cannot delete file."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:168
+#: nova/virt/vmwareapi/error_util.py:167
msgid "File already exists."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:173
+#: nova/virt/vmwareapi/error_util.py:172
msgid "File fault."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:178
+#: nova/virt/vmwareapi/error_util.py:177
msgid "File locked."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:183
+#: nova/virt/vmwareapi/error_util.py:182
msgid "File not found."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:188
+#: nova/virt/vmwareapi/error_util.py:187
msgid "Invalid property."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:193
+#: nova/virt/vmwareapi/error_util.py:192
msgid "No Permission."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:198
+#: nova/virt/vmwareapi/error_util.py:197
msgid "Not Authenticated."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:203
+#: nova/virt/vmwareapi/error_util.py:202
msgid "Invalid Power State."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:228
+#: nova/virt/vmwareapi/error_util.py:227
#, python-format
msgid "Fault %s not matched."
msgstr ""
-#: nova/virt/vmwareapi/fake.py:243
-#, python-format
-msgid "Property %(attr)s not set for the managed object %(name)s"
-msgstr "Property %(attr)s not set for the managed object %(name)s"
-
-#: nova/virt/vmwareapi/fake.py:967
-msgid "There is no VM registered"
-msgstr "There is no VM registered"
-
-#: nova/virt/vmwareapi/fake.py:969 nova/virt/vmwareapi/fake.py:1290
-#, python-format
-msgid "Virtual Machine with ref %s is not there"
-msgstr "Virtual Machine with ref %s is not there"
-
-#: nova/virt/vmwareapi/fake.py:1052
-#, python-format
-msgid "Logging out a session that is invalid or already logged out: %s"
-msgstr "Logging out a session that is invalid or already logged out: %s"
-
-#: nova/virt/vmwareapi/fake.py:1070
-msgid "Session Invalid"
-msgstr "Session Invalid"
-
-#: nova/virt/vmwareapi/fake.py:1287
-#, fuzzy
-msgid "No Virtual Machine has been registered yet"
-msgstr " No Virtual Machine has been registered yet"
-
#: nova/virt/vmwareapi/imagecache.py:74
#, python-format
msgid "Unable to delete %(file)s. Exception: %(ex)s"
msgstr ""
-#: nova/virt/vmwareapi/imagecache.py:148
+#: nova/virt/vmwareapi/imagecache.py:147
#, python-format
msgid "Image %s is no longer used by this node. Pending deletion!"
msgstr ""
-#: nova/virt/vmwareapi/imagecache.py:153
+#: nova/virt/vmwareapi/imagecache.py:152
#, python-format
msgid "Image %s is no longer used. Deleting!"
msgstr ""
-#: nova/virt/vmwareapi/io_util.py:121
+#: nova/virt/vmwareapi/io_util.py:122
#, python-format
msgid "Glance image %s is in killed state"
msgstr "Glance image %s is in killed state"
-#: nova/virt/vmwareapi/io_util.py:129
+#: nova/virt/vmwareapi/io_util.py:130
#, python-format
msgid "Glance image %(image_id)s is in unknown state - %(state)s"
msgstr "Glance image %(image_id)s is in unknown state - %(state)s"
@@ -8889,84 +8539,81 @@ msgstr "Exception in %s "
msgid "Unable to retrieve value for %(path)s Reason: %(reason)s"
msgstr ""
-#: nova/virt/vmwareapi/vm_util.py:195
+#: nova/virt/vmwareapi/vm_util.py:202
#, python-format
msgid "%s is not supported."
msgstr ""
-#: nova/virt/vmwareapi/vm_util.py:980
+#: nova/virt/vmwareapi/vm_util.py:1037
msgid "No host available on cluster"
msgstr ""
-#: nova/virt/vmwareapi/vm_util.py:1210
+#: nova/virt/vmwareapi/vm_util.py:1131
#, python-format
msgid "Failed to get cluster references %s"
msgstr ""
-#: nova/virt/vmwareapi/vm_util.py:1222
+#: nova/virt/vmwareapi/vm_util.py:1143
#, python-format
msgid "Failed to get resource pool references %s"
msgstr ""
-#: nova/virt/vmwareapi/vm_util.py:1404
+#: nova/virt/vmwareapi/vm_util.py:1334
msgid "vmwareapi:vm_util:clone_vmref_for_instance, called with vm_ref=None"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:131
+#: nova/virt/vmwareapi/vmops.py:132
#, python-format
msgid "Extending virtual disk failed with error: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:246
+#: nova/virt/vmwareapi/vmops.py:252
msgid "Image disk size greater than requested disk size"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:471
-#, python-format
-msgid "Root disk file creation failed - %s"
-msgstr ""
-
-#: nova/virt/vmwareapi/vmops.py:813
+#: nova/virt/vmwareapi/vmops.py:859
msgid "instance is not powered on"
msgstr "instance is not powered on"
-#: nova/virt/vmwareapi/vmops.py:869
+#: nova/virt/vmwareapi/vmops.py:887
+msgid "Instance does not exist on backend"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:914
#, python-format
msgid ""
"In vmwareapi:vmops:_destroy_instance, got this exception while un-"
"registering the VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:892
-#, python-format
+#: nova/virt/vmwareapi/vmops.py:937
msgid ""
-"In vmwareapi:vmops:_destroy_instance, got this exception while deleting "
-"the VM contents from the disk: %s"
+"In vmwareapi:vmops:_destroy_instance, exception while deleting the VM "
+"contents from the disk"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:926
+#: nova/virt/vmwareapi/vmops.py:969
msgid "pause not supported for vmwareapi"
msgstr "pause not supported for vmwareapi"
-#: nova/virt/vmwareapi/vmops.py:930
+#: nova/virt/vmwareapi/vmops.py:973
msgid "unpause not supported for vmwareapi"
msgstr "unpause not supported for vmwareapi"
-#: nova/virt/vmwareapi/vmops.py:948
+#: nova/virt/vmwareapi/vmops.py:991
#, fuzzy
msgid "instance is powered off and cannot be suspended."
msgstr "instance is powered off and can not be suspended."
-#: nova/virt/vmwareapi/vmops.py:968
+#: nova/virt/vmwareapi/vmops.py:1011
msgid "instance is not in a suspended state"
msgstr "instance is not in a suspended state"
-#: nova/virt/vmwareapi/vmops.py:1056
-#, fuzzy
-msgid "instance is suspended and cannot be powered off."
-msgstr "instance is not powered on"
+#: nova/virt/vmwareapi/vmops.py:1111
+msgid "Unable to shrink disk."
+msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1147
+#: nova/virt/vmwareapi/vmops.py:1170
#, fuzzy, python-format
msgid ""
"In vmwareapi:vmops:confirm_migration, got this exception while destroying"
@@ -8975,49 +8622,59 @@ msgstr ""
"In vmwareapi:vmops:destroy, got this exception while un-registering the "
"VM: %s"
-#: nova/virt/vmwareapi/vmops.py:1213 nova/virt/xenapi/vmops.py:1497
+#: nova/virt/vmwareapi/vmops.py:1246 nova/virt/xenapi/vmops.py:1500
#, python-format
msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
msgstr "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
-#: nova/virt/vmwareapi/vmops.py:1217 nova/virt/xenapi/vmops.py:1501
+#: nova/virt/vmwareapi/vmops.py:1250 nova/virt/xenapi/vmops.py:1504
msgid "Automatically hard rebooting"
msgstr "Automatically hard rebooting"
-#: nova/virt/vmwareapi/volumeops.py:217 nova/virt/vmwareapi/volumeops.py:251
+#: nova/virt/vmwareapi/vmops.py:1568
+#, python-format
+msgid "No device with interface-id %s exists on VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1578
+#, python-format
+msgid "No device with MAC address %s exists on the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:340 nova/virt/vmwareapi/volumeops.py:375
#, python-format
msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
msgstr "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
-#: nova/virt/vmwareapi/volumeops.py:239 nova/virt/vmwareapi/volumeops.py:414
+#: nova/virt/vmwareapi/volumeops.py:363 nova/virt/vmwareapi/volumeops.py:538
#, fuzzy
msgid "Unable to find iSCSI Target"
msgstr "Unable to find address %r"
-#: nova/virt/vmwareapi/volumeops.py:337
+#: nova/virt/vmwareapi/volumeops.py:461
#, python-format
msgid ""
"The volume's backing has been relocated to %s. Need to consolidate "
"backing disk file."
msgstr ""
-#: nova/virt/vmwareapi/volumeops.py:375 nova/virt/vmwareapi/volumeops.py:422
+#: nova/virt/vmwareapi/volumeops.py:499 nova/virt/vmwareapi/volumeops.py:546
#, fuzzy
msgid "Unable to find volume"
msgstr "Failed to find volume in db"
-#: nova/virt/vmwareapi/volumeops.py:395 nova/virt/vmwareapi/volumeops.py:424
+#: nova/virt/vmwareapi/volumeops.py:519 nova/virt/vmwareapi/volumeops.py:548
#: nova/virt/xenapi/volumeops.py:148
#, python-format
msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
msgstr "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
-#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:1768
+#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:1777
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
msgstr "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
-#: nova/virt/xenapi/agent.py:117 nova/virt/xenapi/vmops.py:1773
+#: nova/virt/xenapi/agent.py:117 nova/virt/xenapi/vmops.py:1782
#, python-format
msgid ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
@@ -9026,7 +8683,7 @@ msgstr ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
"args=%(args)r"
-#: nova/virt/xenapi/agent.py:122 nova/virt/xenapi/vmops.py:1778
+#: nova/virt/xenapi/agent.py:122 nova/virt/xenapi/vmops.py:1787
#, python-format
msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
msgstr "The call to %(method)s returned an error: %(e)s. args=%(args)r"
@@ -9092,21 +8749,21 @@ msgstr ""
msgid "Failure while cleaning up attached VDIs"
msgstr "Failure while cleaning up attached VDIs"
-#: nova/virt/xenapi/driver.py:386
+#: nova/virt/xenapi/driver.py:390
#, python-format
msgid "Could not determine key: %s"
msgstr "Could not determine key: %s"
-#: nova/virt/xenapi/driver.py:632
+#: nova/virt/xenapi/driver.py:641
msgid "Host startup on XenServer is not supported."
msgstr "Host startup on XenServer is not supported."
-#: nova/virt/xenapi/fake.py:812
+#: nova/virt/xenapi/fake.py:820
#, python-format
msgid "xenapi.fake does not have an implementation for %s"
msgstr "xenapi.fake does not have an implementation for %s"
-#: nova/virt/xenapi/fake.py:920
+#: nova/virt/xenapi/fake.py:928
#, python-format
msgid ""
"xenapi.fake does not have an implementation for %s or it has been called "
@@ -9115,7 +8772,7 @@ msgstr ""
"xenapi.fake does not have an implementation for %s or it has been called "
"with the wrong number of arguments"
-#: nova/virt/xenapi/host.py:74
+#: nova/virt/xenapi/host.py:73
#, python-format
msgid ""
"Instance %(name)s running on %(host)s could not be found in the database:"
@@ -9124,37 +8781,37 @@ msgstr ""
"Instance %(name)s running on %(host)s could not be found in the database:"
" assuming it is a worker VM and skip ping migration to a new host"
-#: nova/virt/xenapi/host.py:86
+#: nova/virt/xenapi/host.py:85
#, fuzzy, python-format
msgid "Aggregate for host %(host)s count not be found."
msgstr "Compute host %(host)s could not be found."
-#: nova/virt/xenapi/host.py:105
+#: nova/virt/xenapi/host.py:104
#, python-format
msgid "Unable to migrate VM %(vm_ref)s from %(host)s"
msgstr ""
-#: nova/virt/xenapi/host.py:186
+#: nova/virt/xenapi/host.py:185
msgid "Failed to parse information about a pci device for passthrough"
msgstr ""
-#: nova/virt/xenapi/host.py:259
+#: nova/virt/xenapi/host.py:258
#, python-format
msgid ""
"Hostname has changed from %(old)s to %(new)s. A restart is required to "
"take effect."
msgstr ""
-#: nova/virt/xenapi/host.py:284
+#: nova/virt/xenapi/host.py:283
#, python-format
msgid "Failed to extract instance support from %s"
msgstr "Failed to extract instance support from %s"
-#: nova/virt/xenapi/host.py:301
+#: nova/virt/xenapi/host.py:300
msgid "Unable to get updated status"
msgstr "Unable to get updated status"
-#: nova/virt/xenapi/host.py:304
+#: nova/virt/xenapi/host.py:303
#, python-format
msgid "The call to %(method)s returned an error: %(e)s."
msgstr "The call to %(method)s returned an error: %(e)s."
@@ -9230,134 +8887,134 @@ msgid ""
"Expected %(vlan_num)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:208
+#: nova/virt/xenapi/vm_utils.py:210
#, python-format
msgid ""
"Device id %(id)s specified is not supported by hypervisor version "
"%(version)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:325 nova/virt/xenapi/vm_utils.py:340
+#: nova/virt/xenapi/vm_utils.py:328 nova/virt/xenapi/vm_utils.py:343
msgid "VM already halted, skipping shutdown..."
msgstr "VM already halted, skipping shutdown..."
-#: nova/virt/xenapi/vm_utils.py:392
+#: nova/virt/xenapi/vm_utils.py:395
#, python-format
msgid "VBD %s already detached"
msgstr "VBD %s already detached"
-#: nova/virt/xenapi/vm_utils.py:395
+#: nova/virt/xenapi/vm_utils.py:398
#, python-format
msgid ""
"VBD %(vbd_ref)s uplug failed with \"%(err)s\", attempt "
"%(num_attempt)d/%(max_attempts)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:402
+#: nova/virt/xenapi/vm_utils.py:405
#, python-format
msgid "Unable to unplug VBD %s"
msgstr "Unable to unplug VBD %s"
-#: nova/virt/xenapi/vm_utils.py:405
+#: nova/virt/xenapi/vm_utils.py:408
#, python-format
msgid "Reached maximum number of retries trying to unplug VBD %s"
msgstr "Reached maximum number of retries trying to unplug VBD %s"
-#: nova/virt/xenapi/vm_utils.py:417
+#: nova/virt/xenapi/vm_utils.py:420
#, python-format
msgid "Unable to destroy VBD %s"
msgstr "Unable to destroy VBD %s"
-#: nova/virt/xenapi/vm_utils.py:470
+#: nova/virt/xenapi/vm_utils.py:473
#, python-format
msgid "Unable to destroy VDI %s"
msgstr "Unable to destroy VDI %s"
-#: nova/virt/xenapi/vm_utils.py:516
+#: nova/virt/xenapi/vm_utils.py:519
msgid "SR not present and could not be introduced"
msgstr "SR not present and could not be introduced"
-#: nova/virt/xenapi/vm_utils.py:700
+#: nova/virt/xenapi/vm_utils.py:703
#, python-format
msgid "No primary VDI found for %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:792
+#: nova/virt/xenapi/vm_utils.py:795
#, python-format
msgid ""
"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s"
" is of type %(type)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:871
+#: nova/virt/xenapi/vm_utils.py:874
#, python-format
msgid "Multiple base images for image: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:926
+#: nova/virt/xenapi/vm_utils.py:929
#, python-format
msgid ""
"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor "
"size of %(new_disk_size)d bytes."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:937 nova/virt/xenapi/vmops.py:1037
+#: nova/virt/xenapi/vm_utils.py:940 nova/virt/xenapi/vmops.py:1040
msgid "Can't resize a disk to 0 GB."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:989
+#: nova/virt/xenapi/vm_utils.py:992
msgid "Disk must have only one partition."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:994
+#: nova/virt/xenapi/vm_utils.py:997
#, python-format
msgid "Disk contains a filesystem we are unable to resize: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:999
+#: nova/virt/xenapi/vm_utils.py:1002
msgid "The only partition should be partition 1."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1010
+#: nova/virt/xenapi/vm_utils.py:1013
#, python-format
msgid "Attempted auto_configure_disk failed because: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1261
+#: nova/virt/xenapi/vm_utils.py:1264
#, python-format
msgid ""
"Fast cloning is only supported on default local SR of type ext. SR on "
"this system was found to be of type %s. Ignoring the cow flag."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1336
+#: nova/virt/xenapi/vm_utils.py:1339
#, python-format
msgid "Unrecognized cache_images value '%s', defaulting to True"
msgstr "Unrecognized cache_images value '%s', defaulting to True"
-#: nova/virt/xenapi/vm_utils.py:1412
+#: nova/virt/xenapi/vm_utils.py:1415
#, python-format
msgid "Invalid value '%s' for torrent_images"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1435
+#: nova/virt/xenapi/vm_utils.py:1438
#, python-format
msgid "Invalid value '%d' for image_compression_level"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1461
+#: nova/virt/xenapi/vm_utils.py:1464
#, python-format
msgid ""
"Download handler '%(handler)s' raised an exception, falling back to "
"default handler '%(default_handler)s'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1517
+#: nova/virt/xenapi/vm_utils.py:1520
#, python-format
msgid "Image size %(size)d exceeded flavor allowed size %(allowed_size)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1568
+#: nova/virt/xenapi/vm_utils.py:1571
#, python-format
msgid ""
"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d "
@@ -9366,37 +9023,37 @@ msgstr ""
"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d "
"bytes"
-#: nova/virt/xenapi/vm_utils.py:1610
+#: nova/virt/xenapi/vm_utils.py:1613
msgid "Failed to fetch glance image"
msgstr "Failed to fetch glance image"
-#: nova/virt/xenapi/vm_utils.py:1818
+#: nova/virt/xenapi/vm_utils.py:1846
#, python-format
msgid "Unable to parse rrd of %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1848
+#: nova/virt/xenapi/vm_utils.py:1876
#, python-format
msgid "Retry SR scan due to error: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1881
+#: nova/virt/xenapi/vm_utils.py:1909
#, python-format
msgid "Flag sr_matching_filter '%s' does not respect formatting convention"
msgstr "Flag sr_matching_filter '%s' does not respect formatting convention"
-#: nova/virt/xenapi/vm_utils.py:1902
+#: nova/virt/xenapi/vm_utils.py:1930
msgid ""
"XenAPI is unable to find a Storage Repository to install guest instances "
"on. Please check your configuration (e.g. set a default SR for the pool) "
"and/or configure the flag 'sr_matching_filter'."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1915
+#: nova/virt/xenapi/vm_utils.py:1943
msgid "Cannot find SR of content-type ISO"
msgstr "Cannot find SR of content-type ISO"
-#: nova/virt/xenapi/vm_utils.py:1968
+#: nova/virt/xenapi/vm_utils.py:1996
#, python-format
msgid ""
"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: "
@@ -9405,60 +9062,60 @@ msgstr ""
"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: "
"%(server)s."
-#: nova/virt/xenapi/vm_utils.py:2096
+#: nova/virt/xenapi/vm_utils.py:2124
#, python-format
msgid "VHD coalesce attempts exceeded (%d), giving up..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2131
+#: nova/virt/xenapi/vm_utils.py:2159
#, python-format
msgid "Timeout waiting for device %s to be created"
msgstr "Timeout waiting for device %s to be created"
-#: nova/virt/xenapi/vm_utils.py:2151
+#: nova/virt/xenapi/vm_utils.py:2179
#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr "Disconnecting stale VDI %s from compute domU"
-#: nova/virt/xenapi/vm_utils.py:2309
+#: nova/virt/xenapi/vm_utils.py:2337
msgid ""
"Shrinking the filesystem down with resize2fs has failed, please check if "
"you have enough free space on your disk."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2444
+#: nova/virt/xenapi/vm_utils.py:2472
msgid "Manipulating interface files directly"
msgstr "Manipulating interface files directly"
-#: nova/virt/xenapi/vm_utils.py:2453
+#: nova/virt/xenapi/vm_utils.py:2481
#, python-format
msgid "Failed to mount filesystem (expected for non-linux instances): %s"
msgstr "Failed to mount filesystem (expected for non-linux instances): %s"
-#: nova/virt/xenapi/vm_utils.py:2564
+#: nova/virt/xenapi/vm_utils.py:2496
msgid "This domU must be running on the host specified by connection_url"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2633
+#: nova/virt/xenapi/vm_utils.py:2565
msgid "Failed to transfer vhd to new host"
msgstr "Failed to transfer vhd to new host"
-#: nova/virt/xenapi/vm_utils.py:2659
+#: nova/virt/xenapi/vm_utils.py:2591
msgid "ipxe_boot_menu_url not set, user will have to enter URL manually..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2665
+#: nova/virt/xenapi/vm_utils.py:2597
msgid "ipxe_network_name not set, user will have to enter IP manually..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2676
+#: nova/virt/xenapi/vm_utils.py:2608
#, python-format
msgid ""
"Unable to find network matching '%(network_name)s', user will have to "
"enter IP manually..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2700
+#: nova/virt/xenapi/vm_utils.py:2632
#, python-format
msgid "ISO creation tool '%s' does not exist."
msgstr ""
@@ -9467,107 +9124,107 @@ msgstr ""
msgid "Error: Agent is disabled"
msgstr ""
-#: nova/virt/xenapi/vmops.py:375
+#: nova/virt/xenapi/vmops.py:378
msgid "ipxe_boot is True but no ISO image found"
msgstr ""
-#: nova/virt/xenapi/vmops.py:518
+#: nova/virt/xenapi/vmops.py:521
msgid "Failed to spawn, rolling back"
msgstr "Failed to spawn, rolling back"
-#: nova/virt/xenapi/vmops.py:783
+#: nova/virt/xenapi/vmops.py:786
#, fuzzy
msgid "Unable to terminate instance."
msgstr "Failed to terminate instance"
-#: nova/virt/xenapi/vmops.py:835
+#: nova/virt/xenapi/vmops.py:838
#, python-format
msgid "_migrate_disk_resizing_down failed. Restoring orig vm due_to: %s."
msgstr ""
-#: nova/virt/xenapi/vmops.py:989
+#: nova/virt/xenapi/vmops.py:992
#, python-format
msgid "_migrate_disk_resizing_up failed. Restoring orig vm due_to: %s."
msgstr ""
-#: nova/virt/xenapi/vmops.py:996
+#: nova/virt/xenapi/vmops.py:999
#, python-format
msgid "_migrate_disk_resizing_up failed to rollback: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1013
+#: nova/virt/xenapi/vmops.py:1016
msgid "Can't resize down ephemeral disks."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1124
+#: nova/virt/xenapi/vmops.py:1127
msgid "Starting halted instance found during reboot"
msgstr "Starting halted instance found during reboot"
-#: nova/virt/xenapi/vmops.py:1130
+#: nova/virt/xenapi/vmops.py:1133
msgid ""
"Reboot failed due to bad volumes, detaching bad volumes and starting "
"halted instance"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1208
+#: nova/virt/xenapi/vmops.py:1211
msgid "Unable to update metadata, VM not found."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1254
+#: nova/virt/xenapi/vmops.py:1257
msgid "Unable to find root VBD/VDI for VM"
msgstr "Unable to find root VBD/VDI for VM"
-#: nova/virt/xenapi/vmops.py:1292
+#: nova/virt/xenapi/vmops.py:1295
msgid "instance has a kernel or ramdisk but not both"
msgstr "instance has a kernel or ramdisk but not both"
-#: nova/virt/xenapi/vmops.py:1326
+#: nova/virt/xenapi/vmops.py:1329
msgid "Destroying VM"
msgstr "Destroying VM"
-#: nova/virt/xenapi/vmops.py:1355
+#: nova/virt/xenapi/vmops.py:1358
msgid "VM is not present, skipping destroy..."
msgstr "VM is not present, skipping destroy..."
-#: nova/virt/xenapi/vmops.py:1406
+#: nova/virt/xenapi/vmops.py:1409
#, python-format
msgid "Instance is already in Rescue Mode: %s"
msgstr "Instance is already in Rescue Mode: %s"
-#: nova/virt/xenapi/vmops.py:1448
+#: nova/virt/xenapi/vmops.py:1451
#, fuzzy
msgid "VM is not present, skipping soft delete..."
msgstr "VM is not present, skipping destroy..."
-#: nova/virt/xenapi/vmops.py:1834
+#: nova/virt/xenapi/vmops.py:1843
#, python-format
msgid "Destination host:%s must be in the same aggregate as the source server"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1855
+#: nova/virt/xenapi/vmops.py:1864
#, fuzzy
msgid "No suitable network for migrate"
msgstr "Bad networks format"
-#: nova/virt/xenapi/vmops.py:1861
+#: nova/virt/xenapi/vmops.py:1870
#, python-format
msgid "PIF %s does not contain IP address"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1874
+#: nova/virt/xenapi/vmops.py:1883
msgid "Migrate Receive failed"
msgstr "Migrate Receive failed"
-#: nova/virt/xenapi/vmops.py:1948
+#: nova/virt/xenapi/vmops.py:1957
msgid "XAPI supporting relax-xsm-sr-check=true required"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1959
+#: nova/virt/xenapi/vmops.py:1968
#, python-format
msgid "assert_can_migrate failed because: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:2019
+#: nova/virt/xenapi/vmops.py:2028
msgid "Migrate Send failed"
msgstr "Migrate Send failed"
@@ -9622,6 +9279,11 @@ msgstr "Mountpoint cannot be translated: %s"
msgid "Unable to find SR from VBD %s"
msgstr "Unable to find SR from VBD %s"
+#: nova/virt/xenapi/volume_utils.py:311
+#, python-format
+msgid "Unable to find SR from VDI %s"
+msgstr ""
+
#: nova/virt/xenapi/volumeops.py:63
#, python-format
msgid "Connected volume (vdi_uuid): %s"
@@ -9702,12 +9364,17 @@ msgstr "Unexpected error: %s"
msgid "Starting nova-xvpvncproxy node (version %s)"
msgstr "Starting nova-xvpvncproxy node (version %s)"
-#: nova/volume/cinder.py:236
+#: nova/volume/cinder.py:257
+#, python-format
+msgid "Invalid client version, must be one of: %s"
+msgstr ""
+
+#: nova/volume/cinder.py:281
#, fuzzy
msgid "status must be 'in-use'"
msgstr "status must be available"
-#: nova/volume/cinder.py:242
+#: nova/volume/cinder.py:287
#, fuzzy
msgid "status must be 'available'"
msgstr "status must be available"
diff --git a/nova/locale/es/LC_MESSAGES/nova-log-critical.po b/nova/locale/es/LC_MESSAGES/nova-log-critical.po
new file mode 100644
index 0000000000..5d75a6a4ac
--- /dev/null
+++ b/nova/locale/es/LC_MESSAGES/nova-log-critical.po
@@ -0,0 +1,38 @@
+# Translations template for nova.
+# Copyright (C) 2014 ORGANIZATION
+# This file is distributed under the same license as the nova project.
+#
+# Translators:
+msgid ""
+msgstr ""
+"Project-Id-Version: nova\n"
+"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
+"POT-Creation-Date: 2014-08-12 06:05+0000\n"
+"PO-Revision-Date: 2014-07-25 14:11+0000\n"
+"Last-Translator: openstackjenkins \n"
+"Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/"
+"es/)\n"
+"Language: es\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 1.3\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+#: nova/api/openstack/__init__.py:331
+#, python-format
+msgid "Missing core API extensions: %s"
+msgstr "Extensiones core API omitidas: %s"
+
+#: nova/virt/vmwareapi/driver.py:658
+#, python-format
+msgid ""
+"Unable to connect to server at %(server)s, sleeping for %(seconds)s seconds"
+msgstr ""
+"Incapaz de conectar al servidor en %(server)s, esperando durante %(seconds)s "
+"segundos"
+
+#: nova/virt/vmwareapi/driver.py:767
+#, python-format
+msgid "In vmwareapi: _call_method (session=%s)"
+msgstr "En vmwareapi: _call_method (session=%s)"
diff --git a/nova/locale/es/LC_MESSAGES/nova-log-error.po b/nova/locale/es/LC_MESSAGES/nova-log-error.po
index 600dd01dc6..97d19bdac9 100644
--- a/nova/locale/es/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/es/LC_MESSAGES/nova-log-error.po
@@ -7,8 +7,8 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:08+0000\n"
-"PO-Revision-Date: 2014-06-30 05:01+0000\n"
+"POT-Creation-Date: 2014-08-18 06:04+0000\n"
+"PO-Revision-Date: 2014-08-14 10:51+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/"
"es/)\n"
@@ -39,11 +39,321 @@ msgstr ""
msgid "Exception running %(name)s post-hook: %(obj)s"
msgstr ""
-#: nova/api/ec2/__init__.py:243
+#: nova/api/ec2/__init__.py:244
#, python-format
msgid "Keystone failure: %s"
msgstr "Anomalía de keystone: %s"
+#: nova/api/ec2/__init__.py:493
+#, python-format
+msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
+msgstr "Encontrado %(ex_name)s inesperado : %(ex_str)s"
+
+#: nova/api/ec2/__init__.py:520
+#, python-format
+msgid "Environment: %s"
+msgstr "Entorno: %s"
+
+#: nova/api/metadata/handler.py:155
+#, python-format
+msgid "Failed to get metadata for ip: %s"
+msgstr "Fallo al generar metadatos para la ip %s"
+
+#: nova/api/metadata/handler.py:212
+#, python-format
+msgid "Failed to get metadata for instance id: %s"
+msgstr "No se han podido obtener metadatos para el id de instancia: %s"
+
+#: nova/api/openstack/common.py:134
+#, python-format
+msgid ""
+"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad "
+"upgrade or db corrupted?"
+msgstr ""
+"el estado es UNKNOWN de vm_state=%(vm_state)s task_state=%(task_state)s. "
+"¿Actualización errónea o base de datos dañada?"
+
+#: nova/api/openstack/wsgi.py:684
+#, python-format
+msgid "Exception handling resource: %s"
+msgstr "Excepción al manejar recurso: %s"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:68
+#, python-format
+msgid "Compute.api::pause %s"
+msgstr "Compute.api::pause %s"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:90
+#, python-format
+msgid "Compute.api::unpause %s"
+msgstr "Compute.api::unpause %s"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:112
+#, python-format
+msgid "compute.api::suspend %s"
+msgstr "compute.api::suspend %s"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:134
+#, python-format
+msgid "compute.api::resume %s"
+msgstr "compute.api::resume %s"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:160
+#, python-format
+msgid "Error in migrate %s"
+msgstr "Error al migrar %s"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:179
+#, python-format
+msgid "Compute.api::reset_network %s"
+msgstr "Compute.api::reset_network %s"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:198
+#, python-format
+msgid "Compute.api::inject_network_info %s"
+msgstr "Compute.api::inject_network_info %s"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:215
+#, python-format
+msgid "Compute.api::lock %s"
+msgstr "Compute.api::lock %s"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:234
+#, python-format
+msgid "Compute.api::unlock %s"
+msgstr "Compute.api::unlock %s"
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:392
+#, python-format
+msgid "Compute.api::resetState %s"
+msgstr "Compute.api::resetState %s"
+
+#: nova/api/openstack/compute/contrib/multinic.py:85
+#, python-format
+msgid "Unable to find address %r"
+msgstr "No se puede encontrar la dirección %r"
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85
+msgid "Failed to get default networks"
+msgstr "Fallo al obtener las redes predeterminadas"
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125
+msgid "Failed to update usages deallocating network."
+msgstr "No se han podido actualizar los usos desasignando la red."
+
+#: nova/compute/api.py:561
+msgid "Failed to set instance name using multi_instance_display_name_template."
+msgstr ""
+"Se ha encontrado un error en la definición del nombre de instancia mediante "
+"multi_instance_display_name_template."
+
+#: nova/compute/api.py:1429
+msgid ""
+"Something wrong happened when trying to delete snapshot from shelved "
+"instance."
+msgstr ""
+"Algo malo ha pasado al intentar eliminar la instantánea de la imagen "
+"almacenada."
+
+#: nova/compute/api.py:3732
+msgid "Failed to update usages deallocating security group"
+msgstr ""
+"No se han podido actualizar los usos desasignando el grupo de seguridad "
+
+#: nova/compute/flavors.py:167
+#, python-format
+msgid "DB error: %s"
+msgstr "Error de base de datos: %s"
+
+#: nova/compute/flavors.py:178
+#, python-format
+msgid "Instance type %s not found for deletion"
+msgstr "No se ha encontrado el tipo de instancia %s para suprimirse"
+
+#: nova/compute/manager.py:366
+#, python-format
+msgid "Error while trying to clean up image %s"
+msgstr "Error al intentar limpiar imagen %s"
+
+#: nova/compute/manager.py:755
+msgid "Failed to check if instance shared"
+msgstr "Fallo al verificar si la instancia se encuentra compartida"
+
+#: nova/compute/manager.py:821 nova/compute/manager.py:872
+msgid "Failed to complete a deletion"
+msgstr "Fallo durante la compleción una remoción"
+
+#: nova/compute/manager.py:913
+msgid "Failed to stop instance"
+msgstr "Fallo al detener instancia"
+
+#: nova/compute/manager.py:925
+msgid "Failed to start instance"
+msgstr "Fallo al iniciar instancia"
+
+#: nova/compute/manager.py:950
+msgid "Failed to revert crashed migration"
+msgstr "Se ha encontrado un error en al revertir la migración colgada"
+
+#: nova/compute/manager.py:1364
+msgid "Failed to dealloc network for deleted instance"
+msgstr "No se ha podido desasignar la red para la instancia suprimida"
+
+#: nova/compute/manager.py:1385
+msgid "Failed to dealloc network for failed instance"
+msgstr "Fallo al desasociar red para la instancia fallida"
+
+#: nova/compute/manager.py:1458 nova/compute/manager.py:3527
+msgid "Error trying to reschedule"
+msgstr "Error al intentar volver a programar "
+
+#: nova/compute/manager.py:1567
+#, python-format
+msgid "Instance failed network setup after %(attempts)d attempt(s)"
+msgstr ""
+"La configuración de red de la instancia falló después de %(attempts)d intento"
+"(s)"
+
+#: nova/compute/manager.py:1761
+msgid "Instance failed block device setup"
+msgstr "Ha fallado la configuración de dispositivo de bloque en la instancia"
+
+#: nova/compute/manager.py:1781 nova/compute/manager.py:2123
+#: nova/compute/manager.py:4071
+msgid "Instance failed to spawn"
+msgstr "La instancia no se ha podido generar"
+
+#: nova/compute/manager.py:1964
+msgid "Unexpected build failure, not rescheduling build."
+msgstr "Fallo de compilación inesperado, no se reprogramará la compilación."
+
+#: nova/compute/manager.py:2033 nova/compute/manager.py:2085
+msgid "Failed to allocate network(s)"
+msgstr "Fallo al asociar red(es)"
+
+#: nova/compute/manager.py:2111
+msgid "Failure prepping block device"
+msgstr "Fallo al preparar el dispositivo de bloques"
+
+#: nova/compute/manager.py:2144
+msgid "Failed to deallocate networks"
+msgstr "Fallo al desasociar redes"
+
+#: nova/compute/manager.py:2374 nova/compute/manager.py:3718
+#: nova/compute/manager.py:5822
+msgid "Setting instance vm_state to ERROR"
+msgstr "Estableciendo el vm_state de la instancia a ERROR"
+
+#: nova/compute/manager.py:2586 nova/compute/manager.py:4933
+#, python-format
+msgid "Failed to get compute_info for %s"
+msgstr "Fallo al obtener compute_info para %s"
+
+#: nova/compute/manager.py:3013
+#, python-format
+msgid "set_admin_password failed: %s"
+msgstr "set_admin_password ha fallado: %s"
+
+#: nova/compute/manager.py:3098
+msgid "Error trying to Rescue Instance"
+msgstr "Error al intentar Rescatar Instancia"
+
+#: nova/compute/manager.py:3724
+#, python-format
+msgid "Failed to rollback quota for failed finish_resize: %s"
+msgstr "Fallo al revertir las cuotas para un finish_resize fallido: %s"
+
+#: nova/compute/manager.py:4323
+#, python-format
+msgid "Failed to attach %(volume_id)s at %(mountpoint)s"
+msgstr "Fallo al asociar %(volume_id)s en %(mountpoint)s"
+
+#: nova/compute/manager.py:4362
+#, python-format
+msgid "Failed to detach volume %(volume_id)s from %(mp)s"
+msgstr "No se ha podido desconectar el volumen %(volume_id)s de %(mp)s"
+
+#: nova/compute/manager.py:4441
+#, python-format
+msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s"
+msgstr ""
+"Fallo para intercambiar volúmen %(old_volume_id)s por %(new_volume_id)s"
+
+#: nova/compute/manager.py:4448
+#, python-format
+msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s"
+msgstr ""
+"Fallo al conectar hacia al volúmen %(volume_id)s con el volumen en "
+"%(mountpoint)s"
+
+#: nova/compute/manager.py:4735
+#, python-format
+msgid "Pre live migration failed at %s"
+msgstr "Previo a migración en vivo falló en %s"
+
+#: nova/compute/manager.py:5235
+msgid "Periodic task failed to offload instance."
+msgstr "Tarea periódica falló al descargar instancia."
+
+#: nova/compute/manager.py:5275
+#, python-format
+msgid "Failed to generate usage audit for instance on host %s"
+msgstr ""
+"No se ha podido generar auditoría de uso para la instancia en el host %s "
+
+#: nova/compute/manager.py:5465
+msgid ""
+"Periodic sync_power_state task had an error while processing an instance."
+msgstr ""
+"La tarea periódica sync_power_state ha tenido un error al procesar una "
+"instancia."
+
+#: nova/compute/manager.py:5568 nova/compute/manager.py:5577
+#: nova/compute/manager.py:5608 nova/compute/manager.py:5619
+msgid "error during stop() in sync_power_state."
+msgstr "error durante stop() en sync_power_state."
+
+#: nova/network/neutronv2/api.py:234
+#, python-format
+msgid "Neutron error creating port on network %s"
+msgstr "Error de Neutron al crear puerto en la red: %s"
+
+#: nova/network/neutronv2/api.py:418
+#, python-format
+msgid "Failed to update port %s"
+msgstr "Ha habido un fallo al actualizar el puerto %s"
+
+#: nova/network/neutronv2/api.py:425
+#, python-format
+msgid "Failed to delete port %s"
+msgstr "Ha ocurrido un fallo al eliminar el puerto %s"
+
+#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524
+#, python-format
+msgid "Failed to delete neutron port %s"
+msgstr "Fallo al eliminar el puerto de neutron %s"
+
+#: nova/network/neutronv2/api.py:697
+#, python-format
+msgid "Failed to access port %s"
+msgstr "Fallo al acceder al puerto %s"
+
+#: nova/network/neutronv2/api.py:931
+#, python-format
+msgid "Unable to access floating IP %s"
+msgstr "Incapaz de acceder a la Ip flotante %s"
+
+#: nova/network/neutronv2/api.py:1065
+#, python-format
+msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s"
+msgstr ""
+"Incapaz de acceder a la IP flotante %(fixed_ip)s para el puerto %(port_id)s"
+
+#: nova/network/neutronv2/api.py:1124
+#, python-format
+msgid "Unable to update host of port %s"
+msgstr "Incapaz de actualizar el anfitrión del puerto %s"
+
#: nova/objects/instance_fault.py:87
msgid "Failed to notify cells of instance fault"
msgstr "No se ha podido notificar a las células el error de instancia"
@@ -58,35 +368,35 @@ msgstr "Se está descartando excepción original: %s"
msgid "Unexpected exception occurred %d time(s)... retrying."
msgstr "La excepción inesperada ha ocurrido %d vez(veces)... reintentando."
-#: nova/openstack/common/lockutils.py:120
+#: nova/openstack/common/lockutils.py:119
#, python-format
msgid "Could not release the acquired lock `%s`"
msgstr "No se ha podido liberar el bloqueo adquirido `%s`"
-#: nova/openstack/common/loopingcall.py:89
+#: nova/openstack/common/loopingcall.py:95
msgid "in fixed duration looping call"
msgstr "en llamada en bucle de duración fija"
-#: nova/openstack/common/loopingcall.py:136
+#: nova/openstack/common/loopingcall.py:138
msgid "in dynamic looping call"
msgstr "en llamada en bucle dinámica"
-#: nova/openstack/common/periodic_task.py:179
+#: nova/openstack/common/periodic_task.py:202
#, python-format
msgid "Error during %(full_task_name)s: %(e)s"
msgstr "Error durante %(full_task_name)s: %(e)s"
-#: nova/openstack/common/policy.py:511
+#: nova/openstack/common/policy.py:507
#, python-format
msgid "Failed to understand rule %s"
msgstr "Ha ocurrido un error al interpretar la regla %s"
-#: nova/openstack/common/policy.py:521
+#: nova/openstack/common/policy.py:517
#, python-format
msgid "No handler for matches of kind %s"
msgstr "No hay manejador para coincidencias de clase %s"
-#: nova/openstack/common/policy.py:791
+#: nova/openstack/common/policy.py:787
#, python-format
msgid "Failed to understand rule %r"
msgstr "Ha ocurrido un error al interpretar la regla %r"
@@ -116,54 +426,71 @@ msgstr "Excepción de base de datos recortada."
msgid "Failed to migrate to version %s on engine %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:625
+#: nova/pci/pci_stats.py:119
+msgid ""
+"Failed to allocate PCI devices for instance. Unassigning devices back to "
+"pools. This should not happen, since the scheduler should have accurate "
+"information, and allocation during claims is controlled via a hold on the "
+"compute node semaphore"
+msgstr ""
+
+#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109
+#, python-format
+msgid "PCI device %s not found"
+msgstr ""
+
+#: nova/virt/disk/api.py:388
+#, python-format
+msgid ""
+"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s"
+msgstr ""
+"Se ha encontrado un error en el montaje del sistema de archivos de "
+"contenedor '%(image)s' en '%(target)s': : %(errors)s"
+
+#: nova/virt/libvirt/driver.py:639
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
"Nova necesita libvirt versión %(major)i.%(minor)i.%(micro)i o superior."
-#: nova/virt/libvirt/driver.py:749
+#: nova/virt/libvirt/driver.py:764
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr "Ha fallado la conexión a libvirt: %s"
-#: nova/virt/libvirt/driver.py:873
+#: nova/virt/libvirt/driver.py:927
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr "Error de libvirt durante destrucción. Código=%(errcode)s Error=%(e)s"
-#: nova/virt/libvirt/driver.py:889
-msgid "During wait destroy, instance disappeared."
-msgstr "Durante la destrucción de espera, la instancia ha desaparecido."
-
-#: nova/virt/libvirt/driver.py:951
+#: nova/virt/libvirt/driver.py:1005
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
"Error de libvirt durante borrado de definición. Código=%(errcode)s Error="
"%(e)s"
-#: nova/virt/libvirt/driver.py:977
+#: nova/virt/libvirt/driver.py:1033
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
"Error de libvirt durante eliminación de filtro. Código=%(errcode)s Error="
"%(e)s"
-#: nova/virt/libvirt/driver.py:1389
+#: nova/virt/libvirt/driver.py:1444
msgid "attaching network adapter failed."
msgstr "se ha encontrado un error en la conexión del adaptador de red."
-#: nova/virt/libvirt/driver.py:1414
+#: nova/virt/libvirt/driver.py:1471
msgid "detaching network adapter failed."
msgstr "se ha encontrado un error en la desconexión del adaptador de red."
-#: nova/virt/libvirt/driver.py:1663
+#: nova/virt/libvirt/driver.py:1726
msgid "Failed to send updated snapshot status to volume service."
msgstr ""
"Fallo al enviar estado de instantánea actualizada al servicio de volumen."
-#: nova/virt/libvirt/driver.py:1749
+#: nova/virt/libvirt/driver.py:1834
msgid ""
"Unable to create quiesced VM snapshot, attempting again with quiescing "
"disabled."
@@ -171,70 +498,68 @@ msgstr ""
"Incapaz de crear instantánea de VM inmovilizada, intentando nuevamente con "
"la inmovilidad deshabilitada"
-#: nova/virt/libvirt/driver.py:1755
+#: nova/virt/libvirt/driver.py:1840
msgid "Unable to create VM snapshot, failing volume_snapshot operation."
msgstr ""
"Incapaz de crear instantánea de VM, operación de volume_snapshot fallida."
-#: nova/virt/libvirt/driver.py:1804
+#: nova/virt/libvirt/driver.py:1889
msgid ""
"Error occurred during volume_snapshot_create, sending error status to Cinder."
msgstr ""
"Error ocurrido durante volume_snapshot_create, enviando estado de error a "
"Cinder."
-#: nova/virt/libvirt/driver.py:1951
+#: nova/virt/libvirt/driver.py:2111
msgid ""
"Error occurred during volume_snapshot_delete, sending error status to Cinder."
msgstr ""
"Ha ocurrido un error durante volume_snapshot_delete, envinado estado de "
"error a Cinder."
-#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421
+#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr "Error en '%(path)s' al comprobar E/S directa: '%(ex)s'"
-#: nova/virt/libvirt/driver.py:2542
+#: nova/virt/libvirt/driver.py:2705
#, python-format
msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr "Error al inyectar datos en imagen %(img_id)s (%(e)s)"
-#: nova/virt/libvirt/driver.py:2693
+#: nova/virt/libvirt/driver.py:2873
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr "La creación de unidad de configuración ha fallado con el error: %s"
-#: nova/virt/libvirt/driver.py:2786
+#: nova/virt/libvirt/driver.py:2966
#, python-format
msgid "Attaching PCI devices %(dev)s to %(dom)s failed."
msgstr "La asociación de dispositivos PCI %(dev)s a %(dom)s ha fallado."
-#: nova/virt/libvirt/driver.py:3553
+#: nova/virt/libvirt/driver.py:3783
#, python-format
-msgid "An error occurred while trying to define a domain with xml: %s"
-msgstr "Un error ha ocurrido al tratar de definir un dominio con xml: %s"
+msgid "Error defining a domain with XML: %s"
+msgstr ""
-#: nova/virt/libvirt/driver.py:3562
+#: nova/virt/libvirt/driver.py:3787
#, python-format
-msgid "An error occurred while trying to launch a defined domain with xml: %s"
+msgid "Error launching a defined domain with XML: %s"
msgstr ""
-"Un error ha ocurrido al intentar lanzar un dominio definido con xml: %s"
-#: nova/virt/libvirt/driver.py:3571
+#: nova/virt/libvirt/driver.py:3792
#, python-format
-msgid "An error occurred while enabling hairpin mode on domain with xml: %s"
+msgid "Error enabling hairpin mode with XML: %s"
msgstr ""
-"Un error ha ocurrido al habilitar el modo pasador en el dominio con xml: %s"
-#: nova/virt/libvirt/driver.py:3589
+#: nova/virt/libvirt/driver.py:3806
#, python-format
msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s"
msgstr ""
"Neutron ha reportado una falla en el evento %(event)s para la instancia "
"%(uuid)s"
-#: nova/virt/libvirt/driver.py:3904
+#: nova/virt/libvirt/driver.py:4115
#, python-format
msgid ""
"Hostname has changed from %(old)s to %(new)s. A restart is required to take "
@@ -243,22 +568,22 @@ msgstr ""
"El nombre del anfitrión ha cambiado de %(old)s a %(new)s. Se requiere un "
"reinicio para hacer efecto."
-#: nova/virt/libvirt/driver.py:4481
+#: nova/virt/libvirt/driver.py:4794
#, python-format
msgid "Live Migration failure: %s"
msgstr "Fallo en migración en vivo: %s"
-#: nova/virt/libvirt/driver.py:5231
+#: nova/virt/libvirt/driver.py:5596
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr "No se ha podido limpiar el directorio %(target)s: %(e)s"
-#: nova/virt/libvirt/imagebackend.py:202
+#: nova/virt/libvirt/imagebackend.py:200
#, python-format
msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s"
msgstr "Incapaz de preallocate_images=%(imgs)s en la ruta: %(path)s"
-#: nova/virt/libvirt/imagebackend.py:230
+#: nova/virt/libvirt/imagebackend.py:227
#, python-format
msgid ""
"%(base)s virtual size %(base_size)s larger than flavor root disk size "
@@ -267,40 +592,39 @@ msgstr ""
"El tamaño virtual %(base_size)s de %(base)s es más grande que el tamaño del "
"disco raíz del sabor %(size)s"
-#: nova/virt/libvirt/imagebackend.py:501
-#, python-format
-msgid "error opening rbd image %s"
-msgstr "Error al abrir imagen rbd %s"
-
-#: nova/virt/libvirt/imagecache.py:130
+#: nova/virt/libvirt/imagecache.py:129
#, python-format
msgid "Error reading image info file %(filename)s: %(error)s"
msgstr "Error al leer imagen en archivo %(filename)s: %(error)s"
-#: nova/virt/libvirt/imagecache.py:391
+#: nova/virt/libvirt/imagecache.py:390
#, python-format
msgid "image %(id)s at (%(base_file)s): image verification failed"
msgstr "imagen %(id)s en (%(base_file)s): ha fallado la verificación de imagen"
-#: nova/virt/libvirt/imagecache.py:448
+#: nova/virt/libvirt/imagecache.py:447
#, python-format
msgid "Failed to remove %(base_file)s, error was %(error)s"
msgstr "No se ha podido eliminar %(base_file)s, el error era %(error)s"
-#: nova/virt/libvirt/lvm.py:201
+#: nova/virt/libvirt/lvm.py:200
#, python-format
msgid "ignoring unrecognized volume_clear='%s' value"
msgstr "Ignorando valor no reconocido volume_clear='%s'"
-#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572
-#: nova/virt/libvirt/vif.py:596
+#: nova/virt/libvirt/rbd_utils.py:62
+#, python-format
+msgid "error opening rbd image %s"
+msgstr "Error al abrir imagen rbd %s"
+
+#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474
+#: nova/virt/libvirt/vif.py:496
msgid "Failed while plugging vif"
msgstr "Fallo al conectar vif"
-#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676
-#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717
-#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762
-#: nova/virt/libvirt/vif.py:784
+#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560
+#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598
+#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639
msgid "Failed while unplugging vif"
msgstr "No se ha podido desconectar vif"
@@ -309,12 +633,28 @@ msgstr "No se ha podido desconectar vif"
msgid "Unknown content in connection_info/access_mode: %s"
msgstr "Contenido desconocido en connection_info/access_mode: %s"
-#: nova/virt/libvirt/volume.py:666
+#: nova/virt/libvirt/volume.py:669
#, python-format
msgid "Couldn't unmount the NFS share %s"
msgstr "No se puede desmontar el recurso compartido NFS %s"
-#: nova/virt/libvirt/volume.py:815
+#: nova/virt/libvirt/volume.py:818
#, python-format
msgid "Couldn't unmount the GlusterFS share %s"
msgstr "No se puede desmontar el recurso compartido GlusterFS %s"
+
+#: nova/virt/vmwareapi/vmops.py:508
+#, python-format
+msgid ""
+"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1551
+#, python-format
+msgid "Attaching network adapter failed. Exception: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1591
+#, python-format
+msgid "Detaching network adapter failed. Exception: %s"
+msgstr ""
diff --git a/nova/locale/es/LC_MESSAGES/nova-log-info.po b/nova/locale/es/LC_MESSAGES/nova-log-info.po
index 6d7b4a5c34..75b47b8a43 100644
--- a/nova/locale/es/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/es/LC_MESSAGES/nova-log-info.po
@@ -7,8 +7,8 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
-"PO-Revision-Date: 2014-06-30 05:01+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
+"PO-Revision-Date: 2014-08-15 05:00+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/"
"es/)\n"
@@ -19,27 +19,79 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+#: nova/api/openstack/__init__.py:101
+#, python-format
+msgid "%(url)s returned with HTTP %(status)d"
+msgstr "Se ha devuelto %(url)s con HTTP %(status)d"
+
+#: nova/api/openstack/__init__.py:294
+msgid "V3 API has been disabled by configuration"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:688
+#, python-format
+msgid "Fault thrown: %s"
+msgstr "Error emitido: %s"
+
+#: nova/api/openstack/wsgi.py:691
+#, python-format
+msgid "HTTP exception thrown: %s"
+msgstr "Excepción de HTTP emitida: %s"
+
+#: nova/api/openstack/compute/contrib/os_networks.py:101
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128
+#, python-format
+msgid "Deleting network with id %s"
+msgstr "Suprimiendo red con el id %s"
+
+#: nova/compute/manager.py:2663
+#, python-format
+msgid "bringing vm to original state: '%s'"
+msgstr "poniendo vm en estado original: '%s'"
+
+#: nova/compute/manager.py:5471
+#, python-format
+msgid ""
+"During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+"Durante sync_power_state la instancia ha dejado una tarea pendiente "
+"(%(task)s). Omitir."
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36
+msgid ""
+"Skipped adding reservations_deleted_expire_idx because an equivalent index "
+"already exists."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58
+msgid ""
+"Skipped removing reservations_deleted_expire_idx because index does not "
+"exist."
+msgstr ""
+
#: nova/openstack/common/eventlet_backdoor.py:141
#, python-format
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
msgstr "Eventlet oculto escuchando en %(port)s para el proceso %(pid)d"
-#: nova/openstack/common/lockutils.py:83
+#: nova/openstack/common/lockutils.py:82
#, python-format
msgid "Created lock path: %s"
msgstr "Candado creado ruta: %s"
-#: nova/openstack/common/lockutils.py:250
+#: nova/openstack/common/lockutils.py:251
#, python-format
msgid "Failed to remove file %(file)s"
msgstr ""
-#: nova/openstack/common/periodic_task.py:125
+#: nova/openstack/common/periodic_task.py:126
#, python-format
msgid "Skipping periodic task %(task)s because its interval is negative"
msgstr "Omitiendo la tarea periódica %(task)s porque el intervalo es negativo"
-#: nova/openstack/common/periodic_task.py:130
+#: nova/openstack/common/periodic_task.py:131
#, python-format
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr "Omitiendo la tarea periódica %(task)s porque está inhabilitada"
@@ -101,91 +153,105 @@ msgstr "Eliminando registro duplicado con id: %(id)s de la tabla: %(table)s"
msgid "%(num_values)d values found, of which the minimum value will be used."
msgstr ""
-#: nova/virt/libvirt/driver.py:894
+#: nova/virt/block_device.py:221
+#, python-format
+msgid "preserve multipath_id %s"
+msgstr ""
+
+#: nova/virt/firewall.py:444
+#, python-format
+msgid "instance chain %s disappeared during refresh, skipping"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:139
+msgid "Unable to force TCG mode, libguestfs too old?"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:835
+#, python-format
+msgid ""
+"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:948
msgid "Instance destroyed successfully."
msgstr "La instancia se ha destruido satisfactoriamente. "
-#: nova/virt/libvirt/driver.py:904
+#: nova/virt/libvirt/driver.py:958
msgid "Instance may be started again."
msgstr "La instancia puede volver a iniciarse."
-#: nova/virt/libvirt/driver.py:914
+#: nova/virt/libvirt/driver.py:968
msgid "Going to destroy instance again."
msgstr "Se va a volver a destruir la instancia."
-#: nova/virt/libvirt/driver.py:1518
+#: nova/virt/libvirt/driver.py:1576
msgid "Beginning live snapshot process"
msgstr "Empezando proceso de instantánea en directo"
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1579
msgid "Beginning cold snapshot process"
msgstr "Empezando proceso de instantánea frío"
-#: nova/virt/libvirt/driver.py:1550
+#: nova/virt/libvirt/driver.py:1608
msgid "Snapshot extracted, beginning image upload"
msgstr "Se ha extraído instantánea, empezando subida de imagen"
-#: nova/virt/libvirt/driver.py:1562
+#: nova/virt/libvirt/driver.py:1620
msgid "Snapshot image upload complete"
msgstr "Subida de imagen de instantánea se ha completado"
-#: nova/virt/libvirt/driver.py:1972
+#: nova/virt/libvirt/driver.py:2132
msgid "Instance soft rebooted successfully."
msgstr ""
"La instancia ha rearrancado satisfactoriamente de forma no permanente. "
-#: nova/virt/libvirt/driver.py:2015
+#: nova/virt/libvirt/driver.py:2175
msgid "Instance shutdown successfully."
msgstr "La instancia ha concluido satisfactoriamente."
-#: nova/virt/libvirt/driver.py:2023
+#: nova/virt/libvirt/driver.py:2183
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
"Es posible que la instancia se haya rearrancado durante el arranque no "
"permanente, por consiguiente volver ahora."
-#: nova/virt/libvirt/driver.py:2091
+#: nova/virt/libvirt/driver.py:2252
msgid "Instance rebooted successfully."
msgstr "La instancia ha rearrancado satisfactoriamente."
-#: nova/virt/libvirt/driver.py:2259
+#: nova/virt/libvirt/driver.py:2420
msgid "Instance spawned successfully."
msgstr "Instancia generada satisfactoriamente. "
-#: nova/virt/libvirt/driver.py:2275
+#: nova/virt/libvirt/driver.py:2436
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr "data: %(data)r, fpath: %(fpath)r"
-#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341
+#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr "Se ha devuelto registro de consola truncado, se han ignorado %d bytes "
-#: nova/virt/libvirt/driver.py:2568
+#: nova/virt/libvirt/driver.py:2731
msgid "Creating image"
msgstr "Creando imagen"
-#: nova/virt/libvirt/driver.py:2677
+#: nova/virt/libvirt/driver.py:2857
msgid "Using config drive"
msgstr "Utilizando unidad de configuración"
-#: nova/virt/libvirt/driver.py:2686
+#: nova/virt/libvirt/driver.py:2866
#, python-format
msgid "Creating config drive at %(path)s"
msgstr "Creando unidad de configuración en %(path)s"
-#: nova/virt/libvirt/driver.py:3223
+#: nova/virt/libvirt/driver.py:3437
msgid "Configuring timezone for windows instance to localtime"
msgstr "Configurando la zona horaria para la instancia windows a horario local"
-#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821
-#: nova/virt/libvirt/driver.py:3849
-#, python-format
-msgid "libvirt can't find a domain with id: %s"
-msgstr "libvirt no puede encontrar un dominio con id: %s"
-
-#: nova/virt/libvirt/driver.py:4109
+#: nova/virt/libvirt/driver.py:4320
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
@@ -195,7 +261,7 @@ msgstr ""
"desasociado. Instancia=%(instance_name)s Disco=%(disk)s Código=%(errcode)s "
"Error=%(e)s"
-#: nova/virt/libvirt/driver.py:4115
+#: nova/virt/libvirt/driver.py:4326
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats for "
@@ -204,48 +270,48 @@ msgstr ""
"No se ha podido encontrar el dominio en libvirt para la instancia %s. No se "
"pueden obtener estadísticas de bloque para el dispositivo"
-#: nova/virt/libvirt/driver.py:4330
+#: nova/virt/libvirt/driver.py:4568
#, python-format
msgid "Instance launched has CPU info: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4986
+#: nova/virt/libvirt/driver.py:5316
msgid "Instance running successfully."
msgstr "La instancia se está ejecutando satisfactoriamente."
-#: nova/virt/libvirt/driver.py:5226
+#: nova/virt/libvirt/driver.py:5590
#, python-format
msgid "Deleting instance files %s"
msgstr "Eliminado los archivos de instancia %s"
-#: nova/virt/libvirt/driver.py:5238
+#: nova/virt/libvirt/driver.py:5603
#, python-format
msgid "Deletion of %s failed"
msgstr "La remoción de %s ha fallado"
-#: nova/virt/libvirt/driver.py:5241
+#: nova/virt/libvirt/driver.py:5607
#, python-format
msgid "Deletion of %s complete"
msgstr "La remoción de %s se ha completado"
-#: nova/virt/libvirt/firewall.py:105
+#: nova/virt/libvirt/firewall.py:106
msgid "Called setup_basic_filtering in nwfilter"
msgstr "Se ha llamado a setup_basic_filtering en nwfilter"
-#: nova/virt/libvirt/firewall.py:113
+#: nova/virt/libvirt/firewall.py:114
msgid "Ensuring static filters"
msgstr "Asegurando filtros estáticos"
-#: nova/virt/libvirt/firewall.py:306
+#: nova/virt/libvirt/firewall.py:305
msgid "Attempted to unfilter instance which is not filtered"
msgstr "Se ha intentado eliminar filtro de instancia que no está filtrada"
-#: nova/virt/libvirt/imagecache.py:191
+#: nova/virt/libvirt/imagecache.py:190
#, python-format
msgid "Writing stored info to %s"
msgstr "Grabando información almacenada en %s"
-#: nova/virt/libvirt/imagecache.py:401
+#: nova/virt/libvirt/imagecache.py:400
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash stored"
@@ -253,27 +319,27 @@ msgstr ""
"imagen %(id)s en (%(base_file)s): se ha saltado la verificación de imagen, "
"no hay ningún hash almacenado"
-#: nova/virt/libvirt/imagecache.py:410
+#: nova/virt/libvirt/imagecache.py:409
#, python-format
msgid "%(id)s (%(base_file)s): generating checksum"
msgstr "%(id)s (%(base_file)s): generando suma de comprobación"
-#: nova/virt/libvirt/imagecache.py:438
+#: nova/virt/libvirt/imagecache.py:437
#, python-format
msgid "Base file too young to remove: %s"
msgstr "El archivo de base es demasiado nuevo para eliminarse: %s"
-#: nova/virt/libvirt/imagecache.py:441
+#: nova/virt/libvirt/imagecache.py:440
#, python-format
msgid "Removing base file: %s"
msgstr "Eliminando archivo de base: %s "
-#: nova/virt/libvirt/imagecache.py:459
+#: nova/virt/libvirt/imagecache.py:458
#, python-format
msgid "image %(id)s at (%(base_file)s): checking"
msgstr "imagen %(id)s en (%(base_file)s): comprobando"
-#: nova/virt/libvirt/imagecache.py:483
+#: nova/virt/libvirt/imagecache.py:482
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
@@ -282,26 +348,26 @@ msgstr ""
"imagen %(id)s en (%(base_file)s): en uso: en este nodo %(local)d local, "
"%(remote)d en otros nodos que comparten este almacenamiento de instancia"
-#: nova/virt/libvirt/imagecache.py:550
+#: nova/virt/libvirt/imagecache.py:549
#, python-format
msgid "Active base files: %s"
msgstr "Archivos de base activos: %s"
-#: nova/virt/libvirt/imagecache.py:553
+#: nova/virt/libvirt/imagecache.py:552
#, python-format
msgid "Corrupt base files: %s"
msgstr "Archivos de base corruptos: %s "
-#: nova/virt/libvirt/imagecache.py:557
+#: nova/virt/libvirt/imagecache.py:556
#, python-format
msgid "Removable base files: %s"
msgstr "Archivos de base eliminables: %s"
-#: nova/virt/libvirt/utils.py:530
+#: nova/virt/libvirt/utils.py:490
msgid "findmnt tool is not installed"
msgstr "La herramienta findmnt no está instalada"
-#: nova/virt/xenapi/vm_utils.py:1352
+#: nova/virt/xenapi/vm_utils.py:1355
#, python-format
msgid ""
"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s "
diff --git a/nova/locale/es/LC_MESSAGES/nova-log-warning.po b/nova/locale/es/LC_MESSAGES/nova-log-warning.po
index 64fb423b59..02a8ed3bde 100644
--- a/nova/locale/es/LC_MESSAGES/nova-log-warning.po
+++ b/nova/locale/es/LC_MESSAGES/nova-log-warning.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:08+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
"PO-Revision-Date: 2014-06-24 16:11+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Spanish (http://www.transifex.com/projects/p/nova/language/"
@@ -19,10 +19,141 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
-#: nova/compute/manager.py:1998
+#: nova/api/auth.py:73
+msgid "ratelimit_v3 is removed from v3 api."
+msgstr ""
+
+#: nova/api/auth.py:160
+msgid "Sourcing roles from deprecated X-Role HTTP header"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:169
+#, python-format
+msgid ""
+"Access key %(access_key)s has had %(failures)d failed authentications and "
+"will be locked out for %(lock_mins)d minutes."
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1290
+#: nova/api/openstack/compute/contrib/floating_ips.py:254
+#, python-format
+msgid "multiple fixed_ips exist, using the first: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:119
+msgid ""
+"X-Instance-ID present in request headers. The 'service_metadata_proxy' "
+"option must be enabled to process this header."
+msgstr ""
+
+#: nova/api/metadata/handler.py:189
+#, python-format
+msgid ""
+"X-Instance-ID-Signature: %(signature)s does not match the expected value: "
+"%(expected_signature)s for id: %(instance_id)s. Request From: "
+"%(remote_address)s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:215
+#, python-format
+msgid ""
+"Tenant_id %(tenant_id)s does not match tenant_id of instance %(instance_id)s."
+msgstr ""
+
+#: nova/api/metadata/vendordata_json.py:47
+msgid "file does not exist"
+msgstr ""
+
+#: nova/api/metadata/vendordata_json.py:49
+msgid "Unexpected IOError when reading"
+msgstr ""
+
+#: nova/api/metadata/vendordata_json.py:53
+msgid "failed to load json"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:235 nova/api/openstack/__init__.py:409
+#, python-format
+msgid ""
+"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such "
+"resource"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:282
+#: nova/api/openstack/compute/plugins/v3/servers.py:104
+#, python-format
+msgid "Not loading %s because it is in the blacklist"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:287
+#: nova/api/openstack/compute/plugins/v3/servers.py:109
+#, python-format
+msgid "Not loading %s because it is not in the whitelist"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:307
+#, python-format
+msgid "Extensions in both blacklist and whitelist: %s"
+msgstr ""
+
+#: nova/api/openstack/common.py:456
+msgid "Rejecting snapshot request, snapshots currently disabled"
+msgstr ""
+
+#: nova/api/openstack/extensions.py:279
+#, python-format
+msgid "Failed to load extension %(ext_factory)s: %(exc)s"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:82
+msgid ""
+"XML support has been deprecated and may be removed as early as the Juno "
+"release."
+msgstr ""
+
+#: nova/api/openstack/compute/views/servers.py:197
+msgid "Instance has had its instance_type removed from the DB"
+msgstr ""
+
+#: nova/compute/manager.py:2023
msgid "No more network or fixed IP to be allocated"
msgstr ""
+#: nova/compute/manager.py:2263
+#, python-format
+msgid "Ignoring EndpointNotFound: %s"
+msgstr ""
+
+#: nova/compute/manager.py:2281
+#, python-format
+msgid "Failed to delete volume: %(volume_id)s due to %(exc)s"
+msgstr ""
+
+#: nova/compute/utils.py:204
+#, python-format
+msgid "Can't access image %(image_id)s: %(error)s"
+msgstr ""
+
+#: nova/compute/utils.py:328
+#, python-format
+msgid ""
+"No host name specified for the notification of HostAPI.%s and it will be "
+"ignored"
+msgstr ""
+
+#: nova/compute/utils.py:456
+#, python-format
+msgid ""
+"Value of 0 or None specified for %s. This behaviour will change in meaning "
+"in the K release, to mean 'call at the default rate' rather than 'do not "
+"call'. To keep the 'do not call' behaviour, use a negative value."
+msgstr ""
+
+#: nova/compute/resources/__init__.py:31
+#, python-format
+msgid "Compute resource plugin %s was not loaded"
+msgstr ""
+
#: nova/consoleauth/manager.py:84
#, python-format
msgid "Token: %(token)s failed to save into memcached."
@@ -33,20 +164,52 @@ msgstr ""
msgid "Instance: %(instance_uuid)s failed to save into memcached"
msgstr ""
-#: nova/openstack/common/loopingcall.py:82
+#: nova/network/neutronv2/api.py:218
+#, python-format
+msgid "Neutron error: Port quota exceeded in tenant: %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:223
+#, python-format
+msgid "Neutron error: No more fixed IPs in network: %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:227
+#, python-format
+msgid ""
+"Neutron error: MAC address %(mac)s is already in use on network %(network)s."
+msgstr ""
+
+#: nova/network/neutronv2/api.py:302
+msgid "No network configured!"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:497
+#, python-format
+msgid "Port %s does not exist"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1160
+#, python-format
+msgid ""
+"Network %(id)s not matched with the tenants network! The ports tenant "
+"%(tenant_id)s will be used."
+msgstr ""
+
+#: nova/openstack/common/loopingcall.py:87
#, python-format
-msgid "task run outlasted interval by %s sec"
-msgstr "la ejecución de tarea ha durado %s seg. más que el intervalo"
+msgid "task %(func_name)s run outlasted interval by %(delay).2f sec"
+msgstr ""
-#: nova/openstack/common/network_utils.py:146
+#: nova/openstack/common/network_utils.py:145
msgid "tcp_keepidle not available on your system"
msgstr ""
-#: nova/openstack/common/network_utils.py:153
+#: nova/openstack/common/network_utils.py:152
msgid "tcp_keepintvl not available on your system"
msgstr ""
-#: nova/openstack/common/network_utils.py:160
+#: nova/openstack/common/network_utils.py:159
msgid "tcp_keepknt not available on your system"
msgstr ""
@@ -75,7 +238,7 @@ msgstr ""
msgid "SQL connection failed. %s attempts left."
msgstr "La conexión SQL ha fallado. Quedan %s intentos."
-#: nova/openstack/common/db/sqlalchemy/utils.py:97
+#: nova/openstack/common/db/sqlalchemy/utils.py:96
msgid "Id not in sort_keys; is sort_keys unique?"
msgstr "Id no está en sort_keys; ¿es sort_keys exclusivo?"
@@ -84,7 +247,7 @@ msgid "VCPUs not set; assuming CPU collection broken"
msgstr ""
"VCPU no establecidas; suponiendo que la colección de CPU se ha interrumpido"
-#: nova/scheduler/filters/core_filter.py:92
+#: nova/scheduler/filters/core_filter.py:102
#, python-format
msgid "Could not decode cpu_allocation_ratio: '%s'"
msgstr "No se puede decodificar cpu_allocation_ratio: '%s'"
@@ -94,14 +257,43 @@ msgstr "No se puede decodificar cpu_allocation_ratio: '%s'"
msgid "Could not decode ram_allocation_ratio: '%s'"
msgstr "No se puede decodificar ram_allocation_ratio: '%s'"
-#: nova/virt/libvirt/driver.py:368
+#: nova/storage/linuxscsi.py:100
+#, python-format
+msgid "Multipath call failed exit (%(code)s)"
+msgstr ""
+
+#: nova/storage/linuxscsi.py:121
+#, python-format
+msgid "Couldn't find multipath device %s"
+msgstr ""
+
+#: nova/storage/linuxscsi.py:130
+#, python-format
+msgid "Skip faulty line \"%(dev_line)s\" of multipath device %(mdev)s"
+msgstr ""
+
+#: nova/virt/disk/api.py:366
+#, python-format
+msgid "Ignoring error injecting data into image %(image)s (%(e)s)"
+msgstr ""
+
+#: nova/virt/disk/api.py:456
+#, python-format
+msgid "Ignoring error injecting %(inject)s into image (%(e)s)"
+msgstr ""
+
+#: nova/virt/disk/vfs/api.py:44
+msgid "Unable to import guestfs, falling back to VFSLocalFS"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:376
#, python-format
msgid "Invalid cachemode %(cache_mode)s specified for disk type %(disk_type)s."
msgstr ""
"Modalidad de caché %(cache_mode)s no válida especificada para el tipo de "
"disco %(disk_type)s."
-#: nova/virt/libvirt/driver.py:606
+#: nova/virt/libvirt/driver.py:614
#, python-format
msgid ""
"The libvirt driver is not tested on %(type)s/%(arch)s by the OpenStack "
@@ -112,77 +304,81 @@ msgstr ""
"projecto de OpenStack por lo cual su calidad no puede ser asegurada. Para "
"mas información, ver: https://wiki.openstack.org/wiki/HypervisorSupportMatrix"
-#: nova/virt/libvirt/driver.py:656
+#: nova/virt/libvirt/driver.py:671
#, python-format
msgid "URI %(uri)s does not support events: %(error)s"
msgstr "URI %(uri)s no soporta eventos: %(error)s"
-#: nova/virt/libvirt/driver.py:672
+#: nova/virt/libvirt/driver.py:687
#, python-format
msgid "URI %(uri)s does not support connection events: %(error)s"
msgstr "URI %(uri)s no soporta eventos de conexión: %(error)s"
-#: nova/virt/libvirt/driver.py:865
+#: nova/virt/libvirt/driver.py:919
msgid "Cannot destroy instance, operation time out"
msgstr ""
"No se puede destruir intsancia, tiempo de espera agotado para la operación"
-#: nova/virt/libvirt/driver.py:971
+#: nova/virt/libvirt/driver.py:943
+msgid "During wait destroy, instance disappeared."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1027
msgid "Instance may be still running, destroy it again."
msgstr "Puede que la instancia aún se esté ejecutando, vuelva a destruirla."
-#: nova/virt/libvirt/driver.py:1026
+#: nova/virt/libvirt/driver.py:1080
#, python-format
msgid "Ignoring Volume Error on vol %(vol_id)s during delete %(exc)s"
msgstr ""
"Ignorando Error de volumen en volumen %(vol_id)s durante la remocion %(exc)s"
-#: nova/virt/libvirt/driver.py:1076
+#: nova/virt/libvirt/driver.py:1130
#, python-format
msgid "Volume %(disk)s possibly unsafe to remove, please clean up manually"
msgstr ""
"El volumen %(disk)s es posiblemente inseguro para remover, por favor "
"límpialo manualmente"
-#: nova/virt/libvirt/driver.py:1357 nova/virt/libvirt/driver.py:1365
+#: nova/virt/libvirt/driver.py:1414 nova/virt/libvirt/driver.py:1422
msgid "During detach_volume, instance disappeared."
msgstr "Durante detach_volume, la instancia ha desaparecido."
-#: nova/virt/libvirt/driver.py:1410
+#: nova/virt/libvirt/driver.py:1467
msgid "During detach_interface, instance disappeared."
msgstr "Durante detach_interface, la instancia ha desaparecido."
-#: nova/virt/libvirt/driver.py:1976
+#: nova/virt/libvirt/driver.py:2136
msgid "Failed to soft reboot instance. Trying hard reboot."
msgstr ""
"Fallo al reiniciar la instancia de manera suave. Intentando reinicio duro."
-#: nova/virt/libvirt/driver.py:2537
+#: nova/virt/libvirt/driver.py:2693
#, python-format
msgid "Image %s not found on disk storage. Continue without injecting data"
msgstr ""
"La imagen %s no se ha encontrado en el almacenamiento de disco. Continuando "
"sin inyectar datos."
-#: nova/virt/libvirt/driver.py:2700
+#: nova/virt/libvirt/driver.py:2880
msgid "File injection into a boot from volume instance is not supported"
msgstr ""
"La inyección de archivo al arranque desde la instancia del volumen no está "
"soportado."
-#: nova/virt/libvirt/driver.py:2775
+#: nova/virt/libvirt/driver.py:2955
msgid "Instance disappeared while detaching a PCI device from it."
msgstr ""
"La instancia ha desaparecido mientras se removía el dispositivo PCI de ella."
-#: nova/virt/libvirt/driver.py:2830
+#: nova/virt/libvirt/driver.py:3010
#, python-format
msgid "Cannot update service status on host: %s,since it is not registered."
msgstr ""
"No se puede actualizar el estado del servicio en el anfitrión: %s, ya que el "
"mismo no está registrado."
-#: nova/virt/libvirt/driver.py:2833
+#: nova/virt/libvirt/driver.py:3013
#, python-format
msgid ""
"Cannot update service status on host: %s,due to an unexpected exception."
@@ -190,19 +386,24 @@ msgstr ""
"No se puede atualizar el estado del servicio en el anfitrión: %s, debido a "
"una excepción inesperada."
-#: nova/virt/libvirt/driver.py:2861
+#: nova/virt/libvirt/driver.py:3041
#, python-format
msgid "URI %(uri)s does not support full set of host capabilities: %(error)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3672
+#: nova/virt/libvirt/driver.py:3888
#, python-format
msgid "Timeout waiting for vif plugging callback for instance %(uuid)s"
msgstr ""
"Tiempo excedido para la llamada inversa de la conexión vif para la instancia "
"%(uuid)s"
-#: nova/virt/libvirt/driver.py:3750
+#: nova/virt/libvirt/driver.py:3909
+#, python-format
+msgid "couldn't obtain the XML from domain: %(uuid)s, exception: %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:3966
msgid ""
"Cannot get the number of cpu, because this function is not implemented for "
"this platform. "
@@ -210,24 +411,28 @@ msgstr ""
"No se puede obtener el número de CPU porque esta función no está "
"implementada para esta plataforma. "
-#: nova/virt/libvirt/driver.py:3813
+#: nova/virt/libvirt/driver.py:4028
+#, python-format
+msgid ""
+"couldn't obtain the vpu count from domain id: %(uuid)s, exception: %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:4059
#, python-format
-msgid "couldn't obtain the vpu count from domain id: %(id)s, exception: %(ex)s"
+msgid "couldn't obtain the memory from domain: %(uuid)s, exception: %(ex)s"
msgstr ""
-"no se puede obtener el conteo de vpu del identificador del dominio: %(id)s, "
-"excepción: %(ex)s"
-#: nova/virt/libvirt/driver.py:4050
+#: nova/virt/libvirt/driver.py:4261
#, python-format
msgid "URI %(uri)s does not support listDevices: %(error)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4594
+#: nova/virt/libvirt/driver.py:4916
#, python-format
msgid "plug_vifs() failed %(cnt)d. Retry up to %(max_retry)d."
msgstr "plug_vifs() ha fallado %(cnt)d. Intentando hasta %(max_retry)d."
-#: nova/virt/libvirt/driver.py:4727
+#: nova/virt/libvirt/driver.py:5126
#, python-format
msgid ""
"Error from libvirt while getting description of %(instance_name)s: [Error "
@@ -236,7 +441,7 @@ msgstr ""
"Error de libvirt al obtener la descripción de %(instance_name)s: [Código de "
"error %(error_code)s] %(ex)s"
-#: nova/virt/libvirt/driver.py:4805
+#: nova/virt/libvirt/driver.py:5134
#, python-format
msgid ""
"Periodic task is updating the host stat, it is trying to get disk "
@@ -247,7 +452,7 @@ msgstr ""
"intentando obtener el disco %(i_name)s, pero el disco ha sido removido por "
"operaciones concurrentes como la modificación de tamaño."
-#: nova/virt/libvirt/driver.py:4811
+#: nova/virt/libvirt/driver.py:5140
#, python-format
msgid ""
"Periodic task is updating the host stat, it is trying to get disk "
@@ -255,14 +460,14 @@ msgid ""
"on the compute node but is not managed by Nova."
msgstr ""
-#: nova/virt/libvirt/firewall.py:49
+#: nova/virt/libvirt/firewall.py:50
msgid ""
"Libvirt module could not be loaded. NWFilterFirewall will not work correctly."
msgstr ""
"El módulo lLibvirt no se ha podido cargar. NWFilterFirewall no funcionará "
"correctamente."
-#: nova/virt/libvirt/imagecache.py:318
+#: nova/virt/libvirt/imagecache.py:317
#, python-format
msgid ""
"Instance %(instance)s is using a backing file %(backing)s which does not "
@@ -271,7 +476,7 @@ msgstr ""
"La instancia %(instance)s utiliza un archivo de respaldo %(backing)s que no "
"aparece en el servicio de la imagen"
-#: nova/virt/libvirt/imagecache.py:495
+#: nova/virt/libvirt/imagecache.py:494
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): warning -- an absent base file is in use! "
@@ -280,12 +485,12 @@ msgstr ""
"%(id)s (%(base_file)s): aviso -- se está utilizando un archivo base ausente. "
"instancias: %(instance_list)s"
-#: nova/virt/libvirt/imagecache.py:545
+#: nova/virt/libvirt/imagecache.py:544
#, python-format
msgid "Unknown base file: %s"
msgstr "Archivo de base desconocido: %s "
-#: nova/virt/libvirt/lvm.py:68
+#: nova/virt/libvirt/lvm.py:67
#, python-format
msgid ""
"Volume group %(vg)s will not be able to hold sparse volume %(lv)s. Virtual "
@@ -296,28 +501,21 @@ msgstr ""
"tamaño del volumen virtual es %(size)db, pero el espacio libre en el grupo "
"de volúmenes es solo %(free_space)db."
+#: nova/virt/libvirt/rbd_utils.py:268
+#, python-format
+msgid "rbd remove %(volume)s in pool %(pool)s failed"
+msgstr ""
+
#: nova/virt/libvirt/utils.py:69 nova/virt/libvirt/utils.py:75
msgid "systool is not installed"
msgstr "systool no está instalado"
-#: nova/virt/libvirt/utils.py:242
-#, python-format
-msgid "rbd remove %(name)s in pool %(pool)s failed"
-msgstr "la remoción rbd de %(name)s en el conjunto %(pool)s ha fallado"
-
-#: nova/virt/libvirt/vif.py:827
-#, python-format
-msgid ""
-"VIF driver \"%s\" is marked as deprecated and will be removed in the Juno "
-"release."
-msgstr ""
-
#: nova/virt/libvirt/volume.py:132
#, python-format
msgid "Unknown content in connection_info/qos_specs: %s"
msgstr "Contenido desconocido en connection_info/qos_specs: %s"
-#: nova/virt/libvirt/volume.py:294
+#: nova/virt/libvirt/volume.py:297
#, python-format
msgid ""
"ISCSI volume not yet found at: %(disk_dev)s. Will rescan & retry. Try "
@@ -326,12 +524,12 @@ msgstr ""
"El volumen ISCSI aún no se ha encontrado en: %(disk_dev)s. Se volverá a "
"explorar y se reintentará. Número de intentos: %(tries)s"
-#: nova/virt/libvirt/volume.py:361
+#: nova/virt/libvirt/volume.py:364
#, python-format
msgid "Unable to delete volume device %s"
msgstr "Incapaz de eliminar el dispositivo de volumen %s"
-#: nova/virt/libvirt/volume.py:372
+#: nova/virt/libvirt/volume.py:375
#, python-format
msgid ""
"Failed to remove multipath device descriptor %(dev_mapper)s. Exception "
@@ -340,19 +538,19 @@ msgstr ""
"Fallo al remover el descriptor del dispositivo multiruta %(dev_mapper)s. "
"Mensaje de excepción: %(msg)s"
-#: nova/virt/libvirt/volume.py:694 nova/virt/libvirt/volume.py:843
+#: nova/virt/libvirt/volume.py:697 nova/virt/libvirt/volume.py:846
#, python-format
msgid "%s is already mounted"
msgstr "%s ya está montado "
-#: nova/virt/libvirt/volume.py:739
+#: nova/virt/libvirt/volume.py:742
#, python-format
msgid "AoE volume not yet found at: %(aoedevpath)s. Try number: %(tries)s"
msgstr ""
"El volumen AoE aún no se ha encontrado en: %(aoedevpath)s. Número de "
"intentos: %(tries)s"
-#: nova/virt/libvirt/volume.py:931
+#: nova/virt/libvirt/volume.py:934
#, python-format
msgid ""
"Fibre volume not yet found at: %(mount_device)s. Will rescan & retry. Try "
@@ -361,19 +559,46 @@ msgstr ""
"El volumen de fibra aún no se ha encontrado en: %(mount_device)s. Se volverá "
"a explorar y se reintentará. Número de intentos: %(tries)s"
-#: nova/virt/libvirt/volume.py:1033
+#: nova/virt/libvirt/volume.py:995
+#, python-format
+msgid "multipath-tools probably work improperly. devices to remove = %s."
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:1040
msgid "Value required for 'scality_sofs_config'"
msgstr "Valor necesario para 'scality_sofs_config'"
-#: nova/virt/libvirt/volume.py:1044
+#: nova/virt/libvirt/volume.py:1051
#, python-format
msgid "Cannot access 'scality_sofs_config': %s"
msgstr "No se puede acceder a 'scality_sofs_config': %s"
-#: nova/virt/libvirt/volume.py:1050
+#: nova/virt/libvirt/volume.py:1057
msgid "Cannot execute /sbin/mount.sofs"
msgstr "No se puede ejecutar /sbin/mount.sofs"
-#: nova/virt/libvirt/volume.py:1065
+#: nova/virt/libvirt/volume.py:1072
msgid "Cannot mount Scality SOFS, check syslog for errors"
msgstr "No se puede montar Scality SOFS, compruebe syslog por si hay errores"
+
+#: nova/virt/vmwareapi/driver.py:96
+msgid ""
+"The VMware ESX driver is now deprecated and has been removed in the Juno "
+"release. The VC driver will remain and continue to be supported."
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:157
+#, python-format
+msgid "The following clusters could not be found in the vCenter %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:202
+msgid "Instance cannot be found in host, or in an unknownstate."
+msgstr ""
+
+#: nova/volume/cinder.py:249
+msgid ""
+"Cinder V1 API is deprecated as of the Juno release, and Nova is still "
+"configured to use it. Enable the V2 API in Cinder and set "
+"cinder_catalog_info in nova.conf to use it."
+msgstr ""
diff --git a/nova/locale/es/LC_MESSAGES/nova.po b/nova/locale/es/LC_MESSAGES/nova.po
index cf6c5d1957..dcb821f4da 100644
--- a/nova/locale/es/LC_MESSAGES/nova.po
+++ b/nova/locale/es/LC_MESSAGES/nova.po
@@ -12,8 +12,8 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
-"PO-Revision-Date: 2014-06-30 04:40+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
+"PO-Revision-Date: 2014-08-15 22:40+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Spanish "
"(http://www.transifex.com/projects/p/nova/language/es/)\n"
@@ -23,39 +23,43 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 1.3\n"
-#: nova/block_device.py:99
+#: nova/block_device.py:102
msgid "Some fields are invalid."
msgstr ""
-#: nova/block_device.py:109
+#: nova/block_device.py:112
msgid "Some required fields are missing"
msgstr ""
-#: nova/block_device.py:125
+#: nova/block_device.py:128
msgid "Boot index is invalid."
msgstr ""
-#: nova/block_device.py:168
+#: nova/block_device.py:171
msgid "Unrecognized legacy format."
msgstr ""
-#: nova/block_device.py:185
+#: nova/block_device.py:188
msgid "Invalid source_type field."
msgstr ""
-#: nova/block_device.py:189
+#: nova/block_device.py:191
+msgid "Invalid device UUID."
+msgstr ""
+
+#: nova/block_device.py:195
msgid "Missing device UUID."
msgstr ""
-#: nova/block_device.py:368
+#: nova/block_device.py:374
msgid "Device name empty or too long."
msgstr ""
-#: nova/block_device.py:372
+#: nova/block_device.py:378
msgid "Device name contains spaces."
msgstr ""
-#: nova/block_device.py:382
+#: nova/block_device.py:388
msgid "Invalid volume_size."
msgstr ""
@@ -363,7 +367,7 @@ msgstr ""
msgid "Group not valid. Reason: %(reason)s"
msgstr "Grupo no válido. Razón: %(reason)s"
-#: nova/exception.py:345 nova/openstack/common/db/sqlalchemy/utils.py:58
+#: nova/exception.py:345 nova/openstack/common/db/sqlalchemy/utils.py:57
msgid "Sort key supplied was not valid."
msgstr "La clave de clasificación proporcionada no es válida. "
@@ -436,53 +440,53 @@ msgstr "Fallo al terminar la instancia: %(reason)s"
msgid "Failed to deploy instance: %(reason)s"
msgstr "Fallo al desplegar instancia: %(reason)s"
-#: nova/exception.py:402
+#: nova/exception.py:402 nova/exception.py:406
#, python-format
msgid "Failed to launch instances: %(reason)s"
msgstr "Fallo al ejecutar instancias: %(reason)s"
-#: nova/exception.py:406
+#: nova/exception.py:410
msgid "Service is unavailable at this time."
msgstr "El servicio no esta disponible en este momento"
-#: nova/exception.py:410
+#: nova/exception.py:414
#, python-format
msgid "Insufficient compute resources: %(reason)s."
msgstr "Recursos de cómputo insuficientes: %(reason)s."
-#: nova/exception.py:414
+#: nova/exception.py:418
#, python-format
msgid "Connection to the hypervisor is broken on host: %(host)s"
msgstr "La conexión al hipervisor está perdida en el anfitrión: %(host)s"
-#: nova/exception.py:418
+#: nova/exception.py:422
#, python-format
msgid "Compute service of %(host)s is unavailable at this time."
msgstr "El servicio Compute de %(host)s no está disponible en este momento."
-#: nova/exception.py:422
+#: nova/exception.py:426
#, python-format
msgid "Compute service of %(host)s is still in use."
msgstr "El servicio Compute de %(host)s todavía se encuentra en uso."
-#: nova/exception.py:426
+#: nova/exception.py:430
#, python-format
msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
msgstr ""
"Incapaz de emigrar la instancia %(instance_id)s al actual anfitrion "
"(%(host)s)"
-#: nova/exception.py:431
+#: nova/exception.py:435
msgid "The supplied hypervisor type of is invalid."
msgstr "El tipo de hipervisor proporcionado no es válido. "
-#: nova/exception.py:435
+#: nova/exception.py:439
msgid "The instance requires a newer hypervisor version than has been provided."
msgstr ""
"La instancia necesita una versión de hipervisor más reciente que la "
"proporcionada."
-#: nova/exception.py:440
+#: nova/exception.py:444
#, python-format
msgid ""
"The supplied disk path (%(path)s) already exists, it is expected not to "
@@ -491,32 +495,32 @@ msgstr ""
"La ruta de disco proporcionada (%(path)s) ya existe, se espera una que no"
" exista."
-#: nova/exception.py:445
+#: nova/exception.py:449
#, python-format
msgid "The supplied device path (%(path)s) is invalid."
msgstr "La ruta proporcionada al dispositivo (%(path)s) no es válida."
-#: nova/exception.py:449
+#: nova/exception.py:453
#, python-format
msgid "The supplied device path (%(path)s) is in use."
msgstr "La ruta proporcionada al dispositivo (%(path)s) está en uso."
-#: nova/exception.py:454
+#: nova/exception.py:458
#, python-format
msgid "The supplied device (%(device)s) is busy."
msgstr "El dispositivo proporcionado (%(device)s) está ocupado."
-#: nova/exception.py:458
+#: nova/exception.py:462
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr "Información de CPU inválida: %(reason)s"
-#: nova/exception.py:462
+#: nova/exception.py:466
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
msgstr "%(address)s no es una direccion IP v4/6 valida"
-#: nova/exception.py:466
+#: nova/exception.py:470
#, python-format
msgid ""
"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN "
@@ -526,7 +530,7 @@ msgstr ""
"etiqueta VLAN que se espera es %(tag)s, pero la asociada con el grupo de "
"puertos es %(pgroup)s."
-#: nova/exception.py:472
+#: nova/exception.py:476
#, python-format
msgid ""
"vSwitch which contains the port group %(bridge)s is not associated with "
@@ -537,60 +541,60 @@ msgstr ""
"con el adaptador físico deseado. El vSwitch esperado es %(expected)s, "
"pero el asociado es %(actual)s."
-#: nova/exception.py:479
+#: nova/exception.py:483
#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "Formato de disco %(disk_format)s no es aceptable"
-#: nova/exception.py:483
+#: nova/exception.py:487
#, python-format
msgid "Disk info file is invalid: %(reason)s"
msgstr "El archivo de información de disco es inválido: %(reason)s"
-#: nova/exception.py:487
+#: nova/exception.py:491
#, python-format
msgid "Failed to read or write disk info file: %(reason)s"
msgstr "Fallo al leer o escribir el archivo de información de disco: %(reason)s"
-#: nova/exception.py:491
+#: nova/exception.py:495
#, python-format
msgid "Image %(image_id)s is unacceptable: %(reason)s"
msgstr "La imagen %(image_id)s es inaceptable: %(reason)s"
-#: nova/exception.py:495
+#: nova/exception.py:499
#, python-format
msgid "Instance %(instance_id)s is unacceptable: %(reason)s"
msgstr "La instancia %(instance_id)s no es aceptable: %(reason)s"
-#: nova/exception.py:499
+#: nova/exception.py:503
#, python-format
msgid "Ec2 id %(ec2_id)s is unacceptable."
msgstr "El id de Ec2 %(ec2_id)s no es aceptable. "
-#: nova/exception.py:503
+#: nova/exception.py:507
#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr "Se esperaba un uuid pero se ha recibido %(uuid)s."
-#: nova/exception.py:507
+#: nova/exception.py:511
#, python-format
msgid "Invalid ID received %(id)s."
msgstr "Se ha recibido el ID %(id)s no válido."
-#: nova/exception.py:511
+#: nova/exception.py:515
msgid "Constraint not met."
msgstr "Restricción no cumplida."
-#: nova/exception.py:516
+#: nova/exception.py:520
msgid "Resource could not be found."
msgstr "No se ha podido encontrar el recurso."
-#: nova/exception.py:521
+#: nova/exception.py:525
#, python-format
msgid "No agent-build associated with id %(id)s."
msgstr "No hay ninguna compilación de agente asociada con el id %(id)s."
-#: nova/exception.py:525
+#: nova/exception.py:529
#, python-format
msgid ""
"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
@@ -599,53 +603,53 @@ msgstr ""
"Compilación agente con hipervisor %(hypervisor)s S.O. %(os)s arquitectura"
" %(architecture)s existe."
-#: nova/exception.py:531
+#: nova/exception.py:535
#, python-format
msgid "Volume %(volume_id)s could not be found."
msgstr "No se ha podido encontrar el volumen %(volume_id)s."
-#: nova/exception.py:535
+#: nova/exception.py:539
#, python-format
msgid "No volume Block Device Mapping with id %(volume_id)s."
msgstr "No hay volumen de Block Device Mapping con identificador %(volume_id)s."
-#: nova/exception.py:540
+#: nova/exception.py:544
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "No se ha podido encontrar la instantánea %(snapshot_id)s."
-#: nova/exception.py:544
+#: nova/exception.py:548
#, python-format
msgid "No disk at %(location)s"
msgstr "No hay ningún disco en %(location)s"
-#: nova/exception.py:548
+#: nova/exception.py:552
#, python-format
msgid "Could not find a handler for %(driver_type)s volume."
msgstr "No se ha podido encontrar un manejador para el volumen %(driver_type)s."
-#: nova/exception.py:552
+#: nova/exception.py:556
#, python-format
msgid "Invalid image href %(image_href)s."
msgstr "href de imagen %(image_href)s no válida."
-#: nova/exception.py:556
+#: nova/exception.py:560
#, python-format
msgid "Requested image %(image)s has automatic disk resize disabled."
msgstr ""
"La imagen solicitada %(image)s tiene desactivada la modificación "
"automática de tamaño de disco."
-#: nova/exception.py:561
+#: nova/exception.py:565
#, python-format
msgid "Image %(image_id)s could not be found."
msgstr "No se ha podido encontrar la imagen %(image_id)s. "
-#: nova/exception.py:565
+#: nova/exception.py:569
msgid "The current driver does not support preserving ephemeral partitions."
msgstr "El dispositivo actual no soporta la preservación de particiones efímeras."
-#: nova/exception.py:571
+#: nova/exception.py:575
#, python-format
msgid ""
"Image %(image_id)s could not be found. The nova EC2 API assigns image ids"
@@ -656,69 +660,69 @@ msgstr ""
"ID de imagen dinámicamente cuando se listan por primera vez. ¿Ha listado "
"los ID de imagen desde que ha añadido esta imagen?"
-#: nova/exception.py:578
+#: nova/exception.py:582
#, python-format
msgid "Project %(project_id)s could not be found."
msgstr "No se ha podido encontrar el proyecto %(project_id)s."
-#: nova/exception.py:582
+#: nova/exception.py:586
msgid "Cannot find SR to read/write VDI."
msgstr "No se puede encontrar SR para leer/grabar VDI."
-#: nova/exception.py:586
+#: nova/exception.py:590
#, python-format
msgid "Network %(network_id)s is duplicated."
msgstr "La red %(network_id)s está duplicada."
-#: nova/exception.py:590
+#: nova/exception.py:594
#, python-format
msgid "Network %(network_id)s is still in use."
msgstr "La red %(network_id)s aún se está utilizando."
-#: nova/exception.py:594
+#: nova/exception.py:598
#, python-format
msgid "%(req)s is required to create a network."
msgstr "Se necesita %(req)s para crear una red."
-#: nova/exception.py:598
+#: nova/exception.py:602
#, python-format
msgid "Network %(network_id)s could not be found."
msgstr "No se ha podido encontrar la red %(network_id)s."
-#: nova/exception.py:602
+#: nova/exception.py:606
#, python-format
msgid "Port id %(port_id)s could not be found."
msgstr "No se ha podido encontrar el ID de puerto %(port_id)s."
-#: nova/exception.py:606
+#: nova/exception.py:610
#, python-format
msgid "Network could not be found for bridge %(bridge)s"
msgstr "No se ha podido encontrar la red para el puente %(bridge)s"
-#: nova/exception.py:610
+#: nova/exception.py:614
#, python-format
msgid "Network could not be found for uuid %(uuid)s"
msgstr "No se ha podido encontrar la red para el uuid %(uuid)s"
-#: nova/exception.py:614
+#: nova/exception.py:618
#, python-format
msgid "Network could not be found with cidr %(cidr)s."
msgstr "No se ha podido encontrar la red con cidr %(cidr)s."
-#: nova/exception.py:618
+#: nova/exception.py:622
#, python-format
msgid "Network could not be found for instance %(instance_id)s."
msgstr "No se ha podido encontrar la red para la instancia %(instance_id)s."
-#: nova/exception.py:622
+#: nova/exception.py:626
msgid "No networks defined."
msgstr "No se han definido redes."
-#: nova/exception.py:626
+#: nova/exception.py:630
msgid "No more available networks."
msgstr "No se encuentran más redes disponibles."
-#: nova/exception.py:630
+#: nova/exception.py:634
#, python-format
msgid ""
"Either network uuid %(network_uuid)s is not present or is not assigned to"
@@ -727,7 +731,7 @@ msgstr ""
"Bien sea que el uuid de la red %(network_uuid)s no está presente o no "
"está asignado al proyecto %(project_id)s."
-#: nova/exception.py:635
+#: nova/exception.py:639
msgid ""
"More than one possible network found. Specify network ID(s) to select "
"which one(s) to connect to,"
@@ -735,86 +739,91 @@ msgstr ""
"Se ha encontrado más de una red posible. Especifique el ID de la red para"
" seleccionar a cuál(es) conectarse."
-#: nova/exception.py:640
+#: nova/exception.py:644
#, python-format
msgid "Network %(network_uuid)s requires a subnet in order to boot instances on."
msgstr ""
"La red %(network_uuid)s requiere una subred para poder arrancar "
"instancias."
-#: nova/exception.py:645
+#: nova/exception.py:649
#, python-format
msgid ""
"It is not allowed to create an interface on external network "
"%(network_uuid)s"
msgstr "No está permitido crear una interfaz en una red externa %(network_uuid)s"
-#: nova/exception.py:650
+#: nova/exception.py:654
+#, python-format
+msgid "Physical network is missing for network %(network_uuid)s"
+msgstr ""
+
+#: nova/exception.py:658
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
"No se ha podido encontrar la(s) referencia(s) de almacén de datos que la "
"MV utiliza."
-#: nova/exception.py:654
+#: nova/exception.py:662
#, python-format
msgid "Port %(port_id)s is still in use."
msgstr "El puerto %(port_id)s todavía se está utilizando."
-#: nova/exception.py:658
+#: nova/exception.py:666
#, python-format
msgid "Port %(port_id)s requires a FixedIP in order to be used."
msgstr "El puerto %(port_id)s requiere una FixedIP para poder ser utilizado."
-#: nova/exception.py:662
+#: nova/exception.py:670
#, python-format
msgid "Port %(port_id)s not usable for instance %(instance)s."
msgstr "El puerto %(port_id)s no es utilizable para la instancia %(instance)s."
-#: nova/exception.py:666
+#: nova/exception.py:674
#, python-format
msgid "No free port available for instance %(instance)s."
msgstr "No hay ningún puerto libre disponible para la instancia %(instance)s."
-#: nova/exception.py:670
+#: nova/exception.py:678
#, python-format
msgid "Fixed ip %(address)s already exists."
msgstr "La dirección IP estática %(address)s ya existe."
-#: nova/exception.py:674
+#: nova/exception.py:682
#, python-format
msgid "No fixed IP associated with id %(id)s."
msgstr "No hay ninguna dirección IP fija asociada con el %(id)s."
-#: nova/exception.py:678
+#: nova/exception.py:686
#, python-format
msgid "Fixed ip not found for address %(address)s."
msgstr "No se ha encontrado una dirección IP fija para la dirección %(address)s."
-#: nova/exception.py:682
+#: nova/exception.py:690
#, python-format
msgid "Instance %(instance_uuid)s has zero fixed ips."
msgstr "La instancia %(instance_uuid)s no tiene ninguna IP fija."
-#: nova/exception.py:686
+#: nova/exception.py:694
#, python-format
msgid "Network host %(host)s has zero fixed ips in network %(network_id)s."
msgstr ""
"El host de red %(host)s no tiene ninguna dirección IP fija en la red "
"%(network_id)s."
-#: nova/exception.py:691
+#: nova/exception.py:699
#, python-format
msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'."
msgstr "La instancia %(instance_uuid)s no tiene la dirección IP fija '%(ip)s'."
-#: nova/exception.py:695
+#: nova/exception.py:703
#, python-format
msgid ""
"Fixed IP address (%(address)s) does not exist in network "
"(%(network_uuid)s)."
msgstr "La dirección IP fija (%(address)s) no existe en la red (%(network_uuid)s)."
-#: nova/exception.py:700
+#: nova/exception.py:708
#, python-format
msgid ""
"Fixed IP address %(address)s is already in use on instance "
@@ -823,128 +832,128 @@ msgstr ""
"La dirección IP fija %(address)s ya se está utilizando en la instancia "
"%(instance_uuid)s."
-#: nova/exception.py:705
+#: nova/exception.py:713
#, python-format
msgid "More than one instance is associated with fixed ip address '%(address)s'."
msgstr "Hay más de una instancia asociada con la dirección IP fija '%(address)s'."
-#: nova/exception.py:710
+#: nova/exception.py:718
#, python-format
msgid "Fixed IP address %(address)s is invalid."
msgstr "La dirección IP fija %(address)s no es válida."
-#: nova/exception.py:715
+#: nova/exception.py:723
msgid "Zero fixed ips available."
msgstr "No hay ninguna dirección IP fija disponible."
-#: nova/exception.py:719
+#: nova/exception.py:727
msgid "Zero fixed ips could be found."
msgstr "No se ha podido encontrar ninguna dirección IP fija."
-#: nova/exception.py:723
+#: nova/exception.py:731
#, python-format
msgid "Floating ip %(address)s already exists."
msgstr "Ya existe la dirección IP flotante %(address)s."
-#: nova/exception.py:728
+#: nova/exception.py:736
#, python-format
msgid "Floating ip not found for id %(id)s."
msgstr "No se ha encontrado ninguna dirección IP flotante para el id %(id)s."
-#: nova/exception.py:732
+#: nova/exception.py:740
#, python-format
msgid "The DNS entry %(name)s already exists in domain %(domain)s."
msgstr "La entrada de DNS %(name)s ya existe en el dominio %(domain)s."
-#: nova/exception.py:736
+#: nova/exception.py:744
#, python-format
msgid "Floating ip not found for address %(address)s."
msgstr ""
"No se ha encontrado ninguna dirección IP flotante para la dirección "
"%(address)s."
-#: nova/exception.py:740
+#: nova/exception.py:748
#, python-format
msgid "Floating ip not found for host %(host)s."
msgstr "No se ha encontrado ninguna dirección IP flotante para el host %(host)s."
-#: nova/exception.py:744
+#: nova/exception.py:752
#, python-format
msgid "Multiple floating ips are found for address %(address)s."
msgstr "Se han encontrado varias ip flotantes para la dirección %(address)s."
-#: nova/exception.py:748
+#: nova/exception.py:756
msgid "Floating ip pool not found."
msgstr "No se ha encontrado pool de ip flotante."
-#: nova/exception.py:753
+#: nova/exception.py:761
msgid "Zero floating ips available."
msgstr "No hay ninguna dirección IP flotante disponible."
-#: nova/exception.py:759
+#: nova/exception.py:767
#, python-format
msgid "Floating ip %(address)s is associated."
msgstr "La dirección IP flotante %(address)s está asociada."
-#: nova/exception.py:763
+#: nova/exception.py:771
#, python-format
msgid "Floating ip %(address)s is not associated."
msgstr "La dirección IP flotante %(address)s no está asociada."
-#: nova/exception.py:767
+#: nova/exception.py:775
msgid "Zero floating ips exist."
msgstr "No existe ninguna dirección IP flotante."
-#: nova/exception.py:772
+#: nova/exception.py:780
#, python-format
msgid "Interface %(interface)s not found."
msgstr "No se ha encontrado la interfaz %(interface)s."
-#: nova/exception.py:777 nova/api/openstack/compute/contrib/floating_ips.py:97
+#: nova/exception.py:785 nova/api/openstack/compute/contrib/floating_ips.py:98
msgid "Cannot disassociate auto assigned floating ip"
msgstr "No se puede desasociar la IP flotante asignada automáticamente"
-#: nova/exception.py:782
+#: nova/exception.py:790
#, python-format
msgid "Keypair %(name)s not found for user %(user_id)s"
msgstr "No se ha encontrado el par de claves %(name)s para el usuario %(user_id)s"
-#: nova/exception.py:786
+#: nova/exception.py:794
#, python-format
msgid "Service %(service_id)s could not be found."
msgstr "No se ha podido encontrar el servicio %(service_id)s."
-#: nova/exception.py:790
+#: nova/exception.py:798
#, python-format
msgid "Service with host %(host)s binary %(binary)s exists."
msgstr "Servicio con host %(host)s binario %(binary)s existe."
-#: nova/exception.py:794
+#: nova/exception.py:802
#, python-format
msgid "Service with host %(host)s topic %(topic)s exists."
msgstr "Servicio con host %(host)s asunto %(topic)s existe."
-#: nova/exception.py:798
+#: nova/exception.py:806
#, python-format
msgid "Host %(host)s could not be found."
msgstr "No se ha podido encontrar el host %(host)s."
-#: nova/exception.py:802
+#: nova/exception.py:810
#, python-format
msgid "Compute host %(host)s could not be found."
msgstr "No se ha podido encontrar el host de Compute %(host)s."
-#: nova/exception.py:806
+#: nova/exception.py:814
#, python-format
msgid "Could not find binary %(binary)s on host %(host)s."
msgstr "No se ha podido encontrar el binario %(binary)s en el host %(host)s."
-#: nova/exception.py:810
+#: nova/exception.py:818
#, python-format
msgid "Invalid reservation expiration %(expire)s."
msgstr "Caducidad de reserva no válida %(expire)s."
-#: nova/exception.py:814
+#: nova/exception.py:822
#, python-format
msgid ""
"Change would make usage less than 0 for the following resources: "
@@ -953,70 +962,75 @@ msgstr ""
"El cambio produciría un uso inferior a 0 para los recursos siguientes: "
"%(unders)s."
-#: nova/exception.py:819
+#: nova/exception.py:827
+#, python-format
+msgid "Wrong quota method %(method)s used on resource %(res)s"
+msgstr ""
+
+#: nova/exception.py:831
msgid "Quota could not be found"
msgstr "No se ha podido encontrar la cuota"
-#: nova/exception.py:823
+#: nova/exception.py:835
#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr "Cuota existente para el proyecto %(project_id)s, recurso %(resource)s"
-#: nova/exception.py:828
+#: nova/exception.py:840
#, python-format
msgid "Unknown quota resources %(unknown)s."
msgstr "Recursos de cuota desconocidos %(unknown)s."
-#: nova/exception.py:832
+#: nova/exception.py:844
#, python-format
msgid "Quota for user %(user_id)s in project %(project_id)s could not be found."
msgstr ""
"No se ha encontrado la cuota para el usuario %(user_id)s en el proyecto "
"%(project_id)s."
-#: nova/exception.py:837
+#: nova/exception.py:849
#, python-format
msgid "Quota for project %(project_id)s could not be found."
msgstr "No se ha encontrado la cuota para el proyecto %(project_id)s."
-#: nova/exception.py:841
+#: nova/exception.py:853
#, python-format
msgid "Quota class %(class_name)s could not be found."
msgstr "No se ha encontrado la clase de cuota %(class_name)s."
-#: nova/exception.py:845
+#: nova/exception.py:857
#, python-format
msgid "Quota usage for project %(project_id)s could not be found."
msgstr "No se ha encontrado el uso de cuota para el proyecto %(project_id)s."
-#: nova/exception.py:849
+#: nova/exception.py:861
#, python-format
msgid "Quota reservation %(uuid)s could not be found."
msgstr "No se ha encontrado la reserva de cuota %(uuid)s."
-#: nova/exception.py:853
+#: nova/exception.py:865
#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr "Cuota superada para recursos: %(overs)s"
-#: nova/exception.py:857
+#: nova/exception.py:869
#, python-format
msgid "Security group %(security_group_id)s not found."
msgstr "No se ha encontrado el grupo de seguridad %(security_group_id)s."
-#: nova/exception.py:861
+#: nova/exception.py:873
#, python-format
msgid "Security group %(security_group_id)s not found for project %(project_id)s."
msgstr ""
"No se ha encontrado el grupo de seguridad %(security_group_id)s para el "
"proyecto %(project_id)s."
-#: nova/exception.py:866
+#: nova/exception.py:878
#, python-format
msgid "Security group with rule %(rule_id)s not found."
msgstr "No se ha encontrado el grupo de seguridad con la regla %(rule_id)s."
-#: nova/exception.py:871
+#: nova/exception.py:883
#, python-format
msgid ""
"Security group %(security_group_name)s already exists for project "
@@ -1025,7 +1039,7 @@ msgstr ""
"El grupo de seguridad %(security_group_name)s ya existe para el proyecto "
"%(project_id)s"
-#: nova/exception.py:876
+#: nova/exception.py:888
#, python-format
msgid ""
"Security group %(security_group_id)s is already associated with the "
@@ -1034,7 +1048,7 @@ msgstr ""
"El grupo de seguridad %(security_group_id)s ya está asociado con la "
"instancia %(instance_id)s"
-#: nova/exception.py:881
+#: nova/exception.py:893
#, python-format
msgid ""
"Security group %(security_group_id)s is not associated with the instance "
@@ -1043,14 +1057,14 @@ msgstr ""
"El grupo de seguridad %(security_group_id)s no está asociado con la "
"instancia %(instance_id)s"
-#: nova/exception.py:886
+#: nova/exception.py:898
#, python-format
msgid "Security group default rule (%rule_id)s not found."
msgstr ""
"La regla predeterminada (%rule_id)s del grupo de seguridad no se ha "
"encontrado."
-#: nova/exception.py:890
+#: nova/exception.py:902
msgid ""
"Network requires port_security_enabled and subnet associated in order to "
"apply security groups."
@@ -1058,33 +1072,33 @@ msgstr ""
"La red requiere port_security_enabled y una subred asociada para aplicar "
"grupos de seguridad."
-#: nova/exception.py:896
+#: nova/exception.py:908
#, python-format
msgid "Rule already exists in group: %(rule)s"
msgstr "La regla ya existe en el grupo: %(rule)s"
-#: nova/exception.py:900
+#: nova/exception.py:912
msgid "No Unique Match Found."
msgstr "No se ha encontrado una sola coincidencia."
-#: nova/exception.py:905
+#: nova/exception.py:917
#, python-format
msgid "Migration %(migration_id)s could not be found."
msgstr "No se ha podido encontrar la migración %(migration_id)s."
-#: nova/exception.py:909
+#: nova/exception.py:921
#, python-format
msgid "Migration not found for instance %(instance_id)s with status %(status)s."
msgstr ""
"No se ha encontrado la migración para la instancia %(instance_id)s con el"
" estado %(status)s."
-#: nova/exception.py:914
+#: nova/exception.py:926
#, python-format
msgid "Console pool %(pool_id)s could not be found."
msgstr "No se ha podido encontrar la agrupación de consolas %(pool_id)s. "
-#: nova/exception.py:918
+#: nova/exception.py:930
#, python-format
msgid ""
"Console pool with host %(host)s, console_type %(console_type)s and "
@@ -1093,7 +1107,7 @@ msgstr ""
"El pool de consolas con host %(host)s, console_type %(console_type)s y "
"compute_host %(compute_host)s ya existe."
-#: nova/exception.py:924
+#: nova/exception.py:936
#, python-format
msgid ""
"Console pool of type %(console_type)s for compute host %(compute_host)s "
@@ -1102,17 +1116,17 @@ msgstr ""
"No se ha encontrado la agrupación de consolas de tipo %(console_type)s "
"para el host de cálculo %(compute_host)s en el host de proxy %(host)s."
-#: nova/exception.py:930
+#: nova/exception.py:942
#, python-format
msgid "Console %(console_id)s could not be found."
msgstr "No se ha podido encontrar la consola %(console_id)s."
-#: nova/exception.py:934
+#: nova/exception.py:946
#, python-format
msgid "Console for instance %(instance_uuid)s could not be found."
msgstr "No se ha podido encontrar la consola para la instancia %(instance_uuid)s."
-#: nova/exception.py:938
+#: nova/exception.py:950
#, python-format
msgid ""
"Console for instance %(instance_uuid)s in pool %(pool_id)s could not be "
@@ -1121,99 +1135,106 @@ msgstr ""
"No se ha podido encontrar la consola para la instancia %(instance_uuid)s "
"en la agrupación %(pool_id)s."
-#: nova/exception.py:943
+#: nova/exception.py:955
#, python-format
msgid "Invalid console type %(console_type)s"
msgstr "Tipo de consola %(console_type)s no válido "
-#: nova/exception.py:947
+#: nova/exception.py:959
#, python-format
msgid "Unavailable console type %(console_type)s."
msgstr "El tipo de consola %(console_type)s no está disponible."
-#: nova/exception.py:951
+#: nova/exception.py:963
#, python-format
msgid "The console port range %(min_port)d-%(max_port)d is exhausted."
msgstr "El puerto de rangos de consola %(min_port)d-%(max_port)d se ha agotado."
-#: nova/exception.py:956
+#: nova/exception.py:968
#, python-format
msgid "Flavor %(flavor_id)s could not be found."
msgstr "No se ha podido encontrar el tipo %(flavor_id)s."
-#: nova/exception.py:960
+#: nova/exception.py:972
#, python-format
msgid "Flavor with name %(flavor_name)s could not be found."
msgstr "No se puede encontrar el sabor con nombre %(flavor_name)s."
-#: nova/exception.py:964
+#: nova/exception.py:976
#, python-format
msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination."
msgstr ""
"No se ha encontrado el acceso de sabor para la combinación %(flavor_id)s "
"/ %(project_id)s. "
-#: nova/exception.py:969
+#: nova/exception.py:981
+#, python-format
+msgid ""
+"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
+"retries."
+msgstr ""
+
+#: nova/exception.py:986
#, python-format
msgid "Cell %(cell_name)s doesn't exist."
msgstr "La célula %(cell_name)s no existe."
-#: nova/exception.py:973
+#: nova/exception.py:990
#, python-format
msgid "Cell with name %(name)s already exists."
msgstr "Una celda con el nombre %(name)s ya existe."
-#: nova/exception.py:977
+#: nova/exception.py:994
#, python-format
msgid "Inconsistency in cell routing: %(reason)s"
msgstr "Incoherencia en direccionamiento de célula: %(reason)s"
-#: nova/exception.py:981
+#: nova/exception.py:998
#, python-format
msgid "Service API method not found: %(detail)s"
msgstr "No se ha encontrado el método de API de servicio: %(detail)s"
-#: nova/exception.py:985
+#: nova/exception.py:1002
msgid "Timeout waiting for response from cell"
msgstr "Se ha excedido el tiempo de espera de respuesta de la célula"
-#: nova/exception.py:989
+#: nova/exception.py:1006
#, python-format
msgid "Cell message has reached maximum hop count: %(hop_count)s"
msgstr ""
"El mensaje de célula ha alcanzado la cuenta de saltos máxima: "
"%(hop_count)s"
-#: nova/exception.py:993
+#: nova/exception.py:1010
msgid "No cells available matching scheduling criteria."
msgstr ""
"No hay células disponibles que coincidan con los criterios de "
"planificación."
-#: nova/exception.py:997
+#: nova/exception.py:1014
msgid "Cannot update cells configuration file."
msgstr "No se puede actualizar el archivo de configuración de la celda."
-#: nova/exception.py:1001
+#: nova/exception.py:1018
#, python-format
msgid "Cell is not known for instance %(instance_uuid)s"
msgstr "No se conoce la célula en la instancia %(instance_uuid)s"
-#: nova/exception.py:1005
+#: nova/exception.py:1022
#, python-format
msgid "Scheduler Host Filter %(filter_name)s could not be found."
msgstr ""
"No se ha podido encontrar el filtro de host de planificador "
"%(filter_name)s."
-#: nova/exception.py:1009
+#: nova/exception.py:1026
#, python-format
msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s."
msgstr ""
"el sabor %(flavor_id)s no tiene especificaciones extras con clave "
"%(extra_specs_key)s"
-#: nova/exception.py:1014
+#: nova/exception.py:1031
#, python-format
msgid ""
"Metric %(name)s could not be found on the compute host node "
@@ -1222,67 +1243,67 @@ msgstr ""
"La métrica %(name)s no se puede encontrar en el nodo de cómputo anfitrión"
" %(host)s:%(node)s."
-#: nova/exception.py:1019
+#: nova/exception.py:1036
#, python-format
msgid "File %(file_path)s could not be found."
msgstr "No se ha podido encontrar el archivo %(file_path)s."
-#: nova/exception.py:1023
+#: nova/exception.py:1040
msgid "Zero files could be found."
msgstr "No se ha podido encontrar ningún archivo."
-#: nova/exception.py:1027
+#: nova/exception.py:1044
#, python-format
msgid "Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
"No se ha encontrado ningún conmutador virtual asociado con el adaptador "
"de red %(adapter)s."
-#: nova/exception.py:1032
+#: nova/exception.py:1049
#, python-format
msgid "Network adapter %(adapter)s could not be found."
msgstr "No se ha podido encontrar el adaptador de red %(adapter)s."
-#: nova/exception.py:1036
+#: nova/exception.py:1053
#, python-format
msgid "Class %(class_name)s could not be found: %(exception)s"
msgstr "No se ha podido encontrar la clase %(class_name)s: %(exception)s"
-#: nova/exception.py:1040
+#: nova/exception.py:1057
msgid "Action not allowed."
msgstr "Acción no permitida. "
-#: nova/exception.py:1044
+#: nova/exception.py:1061
msgid "Rotation is not allowed for snapshots"
msgstr "No se permite la rotación para instantáneas"
-#: nova/exception.py:1048
+#: nova/exception.py:1065
msgid "Rotation param is required for backup image_type"
msgstr ""
"El parámetro de rotación es necesario para el tipo de imagen de copia de "
"seguridad "
-#: nova/exception.py:1053 nova/tests/compute/test_keypairs.py:144
+#: nova/exception.py:1070 nova/tests/compute/test_keypairs.py:146
#, python-format
msgid "Key pair '%(key_name)s' already exists."
msgstr "El par de claves '%(key_name)s' ya existe."
-#: nova/exception.py:1057
+#: nova/exception.py:1074
#, python-format
msgid "Instance %(name)s already exists."
msgstr "La instancia %(name)s ya existe."
-#: nova/exception.py:1061
+#: nova/exception.py:1078
#, python-format
msgid "Flavor with name %(name)s already exists."
msgstr "El sabor con nombre %(name)s ya existe."
-#: nova/exception.py:1065
+#: nova/exception.py:1082
#, python-format
msgid "Flavor with ID %(flavor_id)s already exists."
msgstr "El sabor con ID %(flavor_id)s ya existe."
-#: nova/exception.py:1069
+#: nova/exception.py:1086
#, python-format
msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
@@ -1291,86 +1312,86 @@ msgstr ""
"Versión de acceso ya existe para la combinación de la versión "
"%(flavor_id)s y el proyecto %(project_id)s."
-#: nova/exception.py:1074
+#: nova/exception.py:1091
#, python-format
msgid "%(path)s is not on shared storage: %(reason)s"
msgstr "%(path)s no está en un almacenamiento compartido: %(reason)s"
-#: nova/exception.py:1078
+#: nova/exception.py:1095
#, python-format
msgid "%(path)s is not on local storage: %(reason)s"
msgstr "%(path)s no está en un almacenamiento local: %(reason)s"
-#: nova/exception.py:1082
+#: nova/exception.py:1099
#, python-format
msgid "Storage error: %(reason)s"
msgstr ""
-#: nova/exception.py:1086
+#: nova/exception.py:1103
#, python-format
msgid "Migration error: %(reason)s"
msgstr "Error en migración: %(reason)s"
-#: nova/exception.py:1090
+#: nova/exception.py:1107
#, python-format
msgid "Migration pre-check error: %(reason)s"
msgstr "Error de pre-verificación de migraión: %(reason)s"
-#: nova/exception.py:1094
+#: nova/exception.py:1111
#, python-format
msgid "Malformed message body: %(reason)s"
msgstr "Cuerpo de mensaje con formato incorrecto: %(reason)s"
-#: nova/exception.py:1100
+#: nova/exception.py:1117
#, python-format
msgid "Could not find config at %(path)s"
msgstr "No se ha podido encontrar configuración en %(path)s"
-#: nova/exception.py:1104
+#: nova/exception.py:1121
#, python-format
msgid "Could not load paste app '%(name)s' from %(path)s"
msgstr "No se ha podido cargar aplicación de pegar '%(name)s' desde %(path)s "
-#: nova/exception.py:1108
+#: nova/exception.py:1125
msgid "When resizing, instances must change flavor!"
msgstr "Al redimensionarse, las instancias deben cambiar de sabor."
-#: nova/exception.py:1112
+#: nova/exception.py:1129
#, python-format
msgid "Resize error: %(reason)s"
msgstr "Error de redimensionamiento: %(reason)s"
-#: nova/exception.py:1116
+#: nova/exception.py:1133
#, python-format
msgid "Server disk was unable to be resized because: %(reason)s"
msgstr "El disco del servidor fue incapaz de re-escalarse debido a: %(reason)s"
-#: nova/exception.py:1120
+#: nova/exception.py:1137
msgid "Flavor's memory is too small for requested image."
msgstr "La memoria del sabor es demasiado pequeña para la imagen solicitada."
-#: nova/exception.py:1124
+#: nova/exception.py:1141
msgid "Flavor's disk is too small for requested image."
msgstr "El disco del sabor es demasiado pequeño para la imagen solicitada."
-#: nova/exception.py:1128
+#: nova/exception.py:1145
#, python-format
msgid "Insufficient free memory on compute node to start %(uuid)s."
msgstr ""
"No hay suficiente memoria libre en el nodo de cálculo para iniciar "
"%(uuid)s."
-#: nova/exception.py:1132
+#: nova/exception.py:1149
#, python-format
msgid "No valid host was found. %(reason)s"
msgstr "No se ha encontrado ningún host válido. %(reason)s"
-#: nova/exception.py:1137
+#: nova/exception.py:1154
#, python-format
msgid "Quota exceeded: code=%(code)s"
msgstr "Cuota excedida: código=%(code)s"
-#: nova/exception.py:1144
+#: nova/exception.py:1161
#, python-format
msgid ""
"Quota exceeded for %(overs)s: Requested %(req)s, but already used "
@@ -1379,44 +1400,44 @@ msgstr ""
"Se ha superado la cuota para %(overs)s: se ha solicitado %(req)s, pero ya"
" se utiliza %(used)d de %(allowed)d %(resource)s."
-#: nova/exception.py:1149
+#: nova/exception.py:1166
msgid "Maximum number of floating ips exceeded"
msgstr "Se ha superado el número máximo de IP flotantes"
-#: nova/exception.py:1153
+#: nova/exception.py:1170
msgid "Maximum number of fixed ips exceeded"
msgstr "Se ha superado el número máximo de IP fijas."
-#: nova/exception.py:1157
+#: nova/exception.py:1174
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "El número máximo de elementos de metadatos supera %(allowed)d"
-#: nova/exception.py:1161
+#: nova/exception.py:1178
msgid "Personality file limit exceeded"
msgstr "Se ha superado el límite de archivo de personalidad"
-#: nova/exception.py:1165
+#: nova/exception.py:1182
msgid "Personality file path too long"
msgstr "Vía de acceso de archivo de personalidad demasiado larga"
-#: nova/exception.py:1169
+#: nova/exception.py:1186
msgid "Personality file content too long"
msgstr "Contenido del archivo de personalidad demasiado largo"
-#: nova/exception.py:1173 nova/tests/compute/test_keypairs.py:155
+#: nova/exception.py:1190 nova/tests/compute/test_keypairs.py:157
msgid "Maximum number of key pairs exceeded"
msgstr "Se ha superado el número máximo de pares de claves"
-#: nova/exception.py:1178
+#: nova/exception.py:1195
msgid "Maximum number of security groups or rules exceeded"
msgstr "Se ha superado el número máximo de grupos o reglas de seguridad"
-#: nova/exception.py:1182
+#: nova/exception.py:1199
msgid "Maximum number of ports exceeded"
msgstr "El número máximo de puertos ha sido excedido."
-#: nova/exception.py:1186
+#: nova/exception.py:1203
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: "
@@ -1425,144 +1446,140 @@ msgstr ""
"Agregado %(aggregate_id)s: la acción '%(action)s' ha producido un error: "
"%(reason)s."
-#: nova/exception.py:1191
+#: nova/exception.py:1208
#, python-format
msgid "Aggregate %(aggregate_id)s could not be found."
msgstr "No se ha podido encontrar el agregado %(aggregate_id)s."
-#: nova/exception.py:1195
+#: nova/exception.py:1212
#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "El agregado %(aggregate_name)s ya existe."
-#: nova/exception.py:1199
+#: nova/exception.py:1216
#, python-format
msgid "Aggregate %(aggregate_id)s has no host %(host)s."
msgstr "El agregado %(aggregate_id)s no tiene ningún host %(host)s."
-#: nova/exception.py:1203
+#: nova/exception.py:1220
#, python-format
msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
msgstr ""
"El agregado %(aggregate_id)s no tiene metadatos con la clave "
"%(metadata_key)s."
-#: nova/exception.py:1208
+#: nova/exception.py:1225
#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "El agregado %(aggregate_id)s ya tiene el host %(host)s."
-#: nova/exception.py:1212
+#: nova/exception.py:1229
msgid "Unable to create flavor"
msgstr "Incapaz de crear sabor"
-#: nova/exception.py:1216
+#: nova/exception.py:1233
#, python-format
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr ""
"No se ha podido establecer la contraseña de administrador en %(instance)s"
" debido a %(reason)s"
-#: nova/exception.py:1222
+#: nova/exception.py:1239
#, python-format
msgid "Detected existing vlan with id %(vlan)d"
msgstr "Se ha detectado una vlan existente con el ID %(vlan)d"
-#: nova/exception.py:1226
+#: nova/exception.py:1243
msgid "There was a conflict when trying to complete your request."
msgstr "Hubo un conflicto tratándo de completar su solicitud."
-#: nova/exception.py:1232
+#: nova/exception.py:1249
#, python-format
msgid "Instance %(instance_id)s could not be found."
msgstr "No se ha podido encontrar la instancia %(instance_id)s."
-#: nova/exception.py:1236
+#: nova/exception.py:1253
#, python-format
msgid "Info cache for instance %(instance_uuid)s could not be found."
msgstr ""
"No se ha podido encontrar la memoria caché de información para la "
"instancia %(instance_uuid)s."
-#: nova/exception.py:1241
+#: nova/exception.py:1258
#, python-format
msgid "Node %(node_id)s could not be found."
msgstr "No se ha podido encontrar el nodo %(node_id)s."
-#: nova/exception.py:1245
+#: nova/exception.py:1262
#, python-format
msgid "Node with UUID %(node_uuid)s could not be found."
msgstr "No se ha podido encontrar el nodo con el UUID %(node_uuid)s."
-#: nova/exception.py:1249
+#: nova/exception.py:1266
#, python-format
msgid "Marker %(marker)s could not be found."
msgstr "No se ha podido encontrar el marcador %(marker)s."
-#: nova/exception.py:1254
+#: nova/exception.py:1271
#, python-format
msgid "Invalid id: %(val)s (expecting \"i-...\")."
msgstr "ID no válido: %(val)s (se espera \"i-...\")."
-#: nova/exception.py:1258
+#: nova/exception.py:1275
#, python-format
msgid "Could not fetch image %(image_id)s"
msgstr "No se ha podido captar la imagen %(image_id)s"
-#: nova/exception.py:1262
+#: nova/exception.py:1279
#, python-format
msgid "Could not upload image %(image_id)s"
msgstr "No se ha podido cargar la imagen %(image_id)s"
-#: nova/exception.py:1266
+#: nova/exception.py:1283
#, python-format
msgid "Task %(task_name)s is already running on host %(host)s"
msgstr "La tarea %(task_name)s ya se está ejecutando en el host %(host)s"
-#: nova/exception.py:1270
+#: nova/exception.py:1287
#, python-format
msgid "Task %(task_name)s is not running on host %(host)s"
msgstr "La tarea %(task_name)s no se está ejecutando en el host %(host)s"
-#: nova/exception.py:1274
+#: nova/exception.py:1291
#, python-format
msgid "Instance %(instance_uuid)s is locked"
msgstr "La instancia %(instance_uuid)s está bloqueada"
-#: nova/exception.py:1278
+#: nova/exception.py:1295
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr "Valor inválido para la opción de configuración de controlador: %(option)s"
-#: nova/exception.py:1282
+#: nova/exception.py:1299
#, python-format
msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s"
msgstr ""
"No se ha podido montar la unidad de configuración vfat. %(operation)s ha "
"fallado. Error: %(error)s"
-#: nova/exception.py:1287
+#: nova/exception.py:1304
#, python-format
msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat."
msgstr ""
"Formato de unidad de configuración desconocido %(format)s. Seleccione uno"
" de iso9660 o vfat."
-#: nova/exception.py:1292
+#: nova/exception.py:1309
#, python-format
-msgid "Failed to attach network adapter device to %(instance)s"
+msgid "Failed to attach network adapter device to %(instance_uuid)s"
msgstr ""
-"Se ha encontrado un error en la conexión del dispositivo de adaptador de "
-"red a %(instance)s."
-#: nova/exception.py:1296
+#: nova/exception.py:1314
#, python-format
-msgid "Failed to detach network adapter device from %(instance)s"
+msgid "Failed to detach network adapter device from %(instance_uuid)s"
msgstr ""
-"Se ha encontrado un error en la desconexión del dispositivo de adaptador "
-"de red a %(instance)s."
-#: nova/exception.py:1300
+#: nova/exception.py:1319
#, python-format
msgid ""
"User data too large. User data must be no larger than %(maxsize)s bytes "
@@ -1572,11 +1589,11 @@ msgstr ""
"más de %(maxsize)s bytes una vez se ha codificado base64. Sus datos "
"tienen %(length)d bytes."
-#: nova/exception.py:1306
+#: nova/exception.py:1325
msgid "User data needs to be valid base 64."
msgstr "Los datos de usuario deben ser de base 64 válidos."
-#: nova/exception.py:1310
+#: nova/exception.py:1329
#, python-format
msgid ""
"Unexpected task state: expecting %(expected)s but the actual state is "
@@ -1585,7 +1602,7 @@ msgstr ""
"Estado de tarea inesperado: se esperaba %(expected)s pero el estado es "
"%(actual)s"
-#: nova/exception.py:1319
+#: nova/exception.py:1338
#, python-format
msgid ""
"Action for request_id %(request_id)s on instance %(instance_uuid)s not "
@@ -1594,12 +1611,12 @@ msgstr ""
"La acción para request_id %(request_id)s en la instancia "
"%(instance_uuid)s no se ha encontrado."
-#: nova/exception.py:1324
+#: nova/exception.py:1343
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr "No se ha encontrado el suceso %(event)s para el id de acción %(action_id)s"
-#: nova/exception.py:1328
+#: nova/exception.py:1347
#, python-format
msgid ""
"Unexpected VM state: expecting %(expected)s but the actual state is "
@@ -1608,21 +1625,21 @@ msgstr ""
"Estado de VM inesperado: se esperaba %(expected)s pero el estado actual "
"es %(actual)s"
-#: nova/exception.py:1333
+#: nova/exception.py:1352
#, python-format
msgid "The CA file for %(project)s could not be found"
msgstr "No se ha podido encontrar el archivo CA para %(project)s "
-#: nova/exception.py:1337
+#: nova/exception.py:1356
#, python-format
msgid "The CRL file for %(project)s could not be found"
msgstr "No se ha podido encontrar el archivo CRL para %(project)s"
-#: nova/exception.py:1341
+#: nova/exception.py:1360
msgid "Instance recreate is not supported."
msgstr "La recreación de la instancia no está soportada."
-#: nova/exception.py:1345
+#: nova/exception.py:1364
#, python-format
msgid ""
"The service from servicegroup driver %(driver)s is temporarily "
@@ -1631,21 +1648,21 @@ msgstr ""
"El servicio del controlador servicegroup %(driver)s está temporalmente no"
" disponible."
-#: nova/exception.py:1350
+#: nova/exception.py:1369
#, python-format
msgid "%(binary)s attempted direct database access which is not allowed by policy"
msgstr ""
"%(binary)s ha intentado un acceso de bases de datos directo que no está "
"permitido por la política."
-#: nova/exception.py:1355
+#: nova/exception.py:1374
#, python-format
msgid "Virtualization type '%(virt)s' is not supported by this compute driver"
msgstr ""
"El tipo de virtualización '%(virt)s' no está soportado por este "
"controlador de cálculo"
-#: nova/exception.py:1360
+#: nova/exception.py:1379
#, python-format
msgid ""
"Requested hardware '%(model)s' is not supported by the '%(virt)s' virt "
@@ -1654,123 +1671,123 @@ msgstr ""
"El hardware solicitado '%(model)s' no está soportado por el controlador "
"de virtualización '%(virt)s'"
-#: nova/exception.py:1365
+#: nova/exception.py:1384
#, python-format
msgid "Invalid Base 64 data for file %(path)s"
msgstr "Datos Base-64 inválidos para el archivo %(path)s"
-#: nova/exception.py:1369
+#: nova/exception.py:1388
#, python-format
msgid "Build of instance %(instance_uuid)s aborted: %(reason)s"
msgstr "Construcción de instancia %(instance_uuid)s abortada: %(reason)s"
-#: nova/exception.py:1373
+#: nova/exception.py:1392
#, python-format
msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s"
msgstr "Construcción de instancia %(instance_uuid)s reprogramada: %(reason)s"
-#: nova/exception.py:1378
+#: nova/exception.py:1397
#, python-format
msgid "Shadow table with name %(name)s already exists."
msgstr "Una Tabla Shadow con nombre %(name)s ya existe."
-#: nova/exception.py:1383
+#: nova/exception.py:1402
#, python-format
msgid "Instance rollback performed due to: %s"
msgstr "Reversión de instancia ejecutada debido a: %s"
-#: nova/exception.py:1389
+#: nova/exception.py:1408
#, python-format
msgid "Unsupported object type %(objtype)s"
msgstr "Tipo de objeto no soportado %(objtype)s"
-#: nova/exception.py:1393
+#: nova/exception.py:1412
#, python-format
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr "No se puede ejecutar %(method)s en un objecto huérfano %(objtype)s"
-#: nova/exception.py:1397
+#: nova/exception.py:1416
#, python-format
msgid "Version %(objver)s of %(objname)s is not supported"
msgstr "Versión %(objver)s de %(objname)s no está soportada"
-#: nova/exception.py:1401
+#: nova/exception.py:1420
#, python-format
msgid "Cannot modify readonly field %(field)s"
msgstr ""
-#: nova/exception.py:1405
+#: nova/exception.py:1424
#, python-format
msgid "Object action %(action)s failed because: %(reason)s"
msgstr "La acción objeto %(action)s falló debido a: %(reason)s"
-#: nova/exception.py:1409
+#: nova/exception.py:1428
#, python-format
msgid "Field %(field)s of %(objname)s is not an instance of Field"
msgstr "El campo %(field)s de %(objname)s no es una instancia de campo."
-#: nova/exception.py:1413
+#: nova/exception.py:1432
#, python-format
msgid "Core API extensions are missing: %(missing_apis)s"
msgstr "Faltan las extensiones Core API : %(missing_apis)s"
-#: nova/exception.py:1417
+#: nova/exception.py:1436
#, python-format
msgid "Error during following call to agent: %(method)s"
msgstr "Error durante la siguiente llamada al agente: %(method)s"
-#: nova/exception.py:1421
+#: nova/exception.py:1440
#, python-format
msgid "Unable to contact guest agent. The following call timed out: %(method)s"
msgstr ""
"Unposible contactar al agente invitado. La siguiente llamada agotó su "
"tiempo de espera: %(method)s"
-#: nova/exception.py:1426
+#: nova/exception.py:1445
#, python-format
msgid "Agent does not support the call: %(method)s"
msgstr "El agente no soporta la llamada %(method)s"
-#: nova/exception.py:1430
+#: nova/exception.py:1449
#, python-format
msgid "Instance group %(group_uuid)s could not be found."
msgstr "No se ha podido encontrar el grupo de instancias %(group_uuid)s."
-#: nova/exception.py:1434
+#: nova/exception.py:1453
#, python-format
msgid "Instance group %(group_uuid)s already exists."
msgstr "El grupo de instancias %(group_uuid)s ya existe."
-#: nova/exception.py:1438
+#: nova/exception.py:1457
#, python-format
msgid "Instance group %(group_uuid)s has no metadata with key %(metadata_key)s."
msgstr ""
"El grupo de instancias %(group_uuid)s no tiene metadatos con clave "
"%(metadata_key)s"
-#: nova/exception.py:1443
+#: nova/exception.py:1462
#, python-format
msgid "Instance group %(group_uuid)s has no member with id %(instance_id)s."
msgstr ""
"El grupo de instancias %(group_uuid)s no tiene miembro con identificador "
"%(instance_id)s."
-#: nova/exception.py:1448
+#: nova/exception.py:1467
#, python-format
msgid "Instance group %(group_uuid)s has no policy %(policy)s."
msgstr "El grupo de instancias %(group_uuid)s no tiene política %(policy)s"
-#: nova/exception.py:1452
+#: nova/exception.py:1471
#, python-format
msgid "Number of retries to plugin (%(num_retries)d) exceeded."
msgstr "Se ha excedido el número de reintentos para el plugin (%(num_retries)d)."
-#: nova/exception.py:1456
+#: nova/exception.py:1475
#, python-format
msgid "There was an error with the download module %(module)s. %(reason)s"
msgstr "Hubo un error con el módulo de descarga %(module)s. %(reason)s"
-#: nova/exception.py:1461
+#: nova/exception.py:1480
#, python-format
msgid ""
"The metadata for this location will not work with this module %(module)s."
@@ -1779,37 +1796,50 @@ msgstr ""
"Los metadatos para esta ubicación no funcionarán con este módulo "
"%(module)s. %(reason)s."
-#: nova/exception.py:1466
+#: nova/exception.py:1485
#, python-format
msgid "The method %(method_name)s is not implemented."
msgstr "El método %(method_name)s no está implementado."
-#: nova/exception.py:1470
+#: nova/exception.py:1489
#, python-format
msgid "The module %(module)s is misconfigured: %(reason)s."
msgstr "El módulo %(module)s está mal configurado: %(reason)s"
-#: nova/exception.py:1474
+#: nova/exception.py:1493
#, python-format
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "Error al crear monitor de recursos: %(monitor)s"
-#: nova/exception.py:1478
+#: nova/exception.py:1497
#, python-format
msgid "The PCI address %(address)s has an incorrect format."
msgstr "La dirección PCI %(address)s tiene un formato incorrecto."
-#: nova/exception.py:1482
+#: nova/exception.py:1501
+#, python-format
+msgid ""
+"Invalid PCI Whitelist: The PCI address %(address)s has an invalid "
+"%(field)s."
+msgstr ""
+
+#: nova/exception.py:1506
+msgid ""
+"Invalid PCI Whitelist: The PCI whitelist can specify devname or address, "
+"but not both"
+msgstr ""
+
+#: nova/exception.py:1512
#, python-format
msgid "PCI device %(id)s not found"
msgstr "Dispositivo PCI %(id)s no encontrado"
-#: nova/exception.py:1486
+#: nova/exception.py:1516
#, python-format
msgid "PCI Device %(node_id)s:%(address)s not found."
msgstr "Dispositivo PCI %(node_id)s:%(address)s no encontrado."
-#: nova/exception.py:1490
+#: nova/exception.py:1520
#, python-format
msgid ""
"PCI device %(compute_node_id)s:%(address)s is %(status)s instead of "
@@ -1818,7 +1848,7 @@ msgstr ""
"el dispositivo PCI %(compute_node_id)s:%(address)s está %(status)s en "
"lugar de %(hopestatus)s"
-#: nova/exception.py:1496
+#: nova/exception.py:1526
#, python-format
msgid ""
"PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead "
@@ -1827,12 +1857,12 @@ msgstr ""
"El dueño del dispositivo PCI %(compute_node_id)s:%(address)s es %(owner)s"
" en lugar de %(hopeowner)s"
-#: nova/exception.py:1502
+#: nova/exception.py:1532
#, python-format
msgid "PCI device request (%requests)s failed"
msgstr "Solicitud de dispositivo PCI (%request)s fallida"
-#: nova/exception.py:1507
+#: nova/exception.py:1537
#, python-format
msgid ""
"Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty "
@@ -1841,32 +1871,32 @@ msgstr ""
"Intento de consumir dispositivo PCI %(compute_node_id)s:%(address)s de "
"pool vacío"
-#: nova/exception.py:1513
+#: nova/exception.py:1543
#, python-format
msgid "Invalid PCI alias definition: %(reason)s"
msgstr "Definición de alias PCI inválido: %(reason)s"
-#: nova/exception.py:1517
+#: nova/exception.py:1547
#, python-format
msgid "PCI alias %(alias)s is not defined"
msgstr "Alias PCI %(alias)s no definido"
-#: nova/exception.py:1522
+#: nova/exception.py:1552
#, python-format
msgid "Not enough parameters: %(reason)s"
msgstr "No hay suficientes parámetros: %(reason)s"
-#: nova/exception.py:1527
+#: nova/exception.py:1557
#, python-format
msgid "Invalid PCI devices Whitelist config %(reason)s"
msgstr "Configuración de lista permisiva de dispositivos PCI inválida %(reason)s"
-#: nova/exception.py:1531
+#: nova/exception.py:1561
#, python-format
msgid "Cannot change %(node_id)s to %(new_node_id)s"
msgstr "No se puede cambiar %(node_id)s hacia %(new_node_id)s"
-#: nova/exception.py:1541
+#: nova/exception.py:1571
#, python-format
msgid ""
"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
@@ -1875,39 +1905,39 @@ msgstr ""
"Fallo al preparar el dispositivo PCI %(id)s para la instancia "
"%(instance_uuid)s: %(reason)s"
-#: nova/exception.py:1546
+#: nova/exception.py:1576
#, python-format
msgid "Failed to detach PCI device %(dev)s: %(reason)s"
msgstr "Fallo al desasociar el dispositivo PCI %(dev)s: %(reason)s"
-#: nova/exception.py:1550
+#: nova/exception.py:1580
#, python-format
msgid "%(type)s hypervisor does not support PCI devices"
msgstr "El hipervisor %(type)s no soporta dispositivos PCI"
-#: nova/exception.py:1554
+#: nova/exception.py:1584
#, python-format
msgid "Key manager error: %(reason)s"
msgstr "error de administrador de claves: %(reason)s"
-#: nova/exception.py:1558
+#: nova/exception.py:1588
#, python-format
msgid "Failed to remove volume(s): (%(reason)s)"
msgstr "Fallo al remover el(los) volumen(es): (%(reason)s)"
-#: nova/exception.py:1562
+#: nova/exception.py:1592
#, python-format
msgid "Provided video model (%(model)s) is not supported."
msgstr "Modelo de vídeo proporcionado (%(model)s) no está sopotado."
-#: nova/exception.py:1566
+#: nova/exception.py:1596
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr ""
"La ruta del dispositivo RNG proporcionada: (%(path)s) no está presente en"
" el anfitrión."
-#: nova/exception.py:1571
+#: nova/exception.py:1601
#, python-format
msgid ""
"The requested amount of video memory %(req_vram)d is higher than the "
@@ -1916,24 +1946,94 @@ msgstr ""
"La cantidad solicitada de memoria de vídeo %(req_vram)d es mayor que la "
"máxima permitida por el sabor %(max_vram)d."
-#: nova/exception.py:1576
+#: nova/exception.py:1606
#, python-format
msgid "Provided watchdog action (%(action)s) is not supported."
msgstr "La acción watchdog proporcionada (%(action)s) no está soportada."
-#: nova/exception.py:1580
+#: nova/exception.py:1610
msgid ""
-"Block migration of instances with config drives is not supported in "
-"libvirt."
+"Live migration of instances with config drives is not supported in "
+"libvirt unless libvirt instance path and drive data is shared across "
+"compute nodes."
msgstr ""
-"La migración de bloque de instancias con discos configurados no está "
-"soportada en libvirt."
-#: nova/exception.py:1585
+#: nova/exception.py:1616
+#, python-format
+msgid ""
+"Host %(server)s is running an old version of Nova, live migrations "
+"involving that version may cause data loss. Upgrade Nova on %(server)s "
+"and try again."
+msgstr ""
+
+#: nova/exception.py:1622
#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr "Error durante la extracción de la instancia %(instance_id)s: %(reason)s"
+#: nova/exception.py:1626
+#, python-format
+msgid ""
+"Image vCPU limits %(sockets)d:%(cores)d:%(threads)d exceeds permitted "
+"%(maxsockets)d:%(maxcores)d:%(maxthreads)d"
+msgstr ""
+
+#: nova/exception.py:1631
+#, python-format
+msgid ""
+"Image vCPU topology %(sockets)d:%(cores)d:%(threads)d exceeds permitted "
+"%(maxsockets)d:%(maxcores)d:%(maxthreads)d"
+msgstr ""
+
+#: nova/exception.py:1636
+#, python-format
+msgid ""
+"Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to"
+" satisfy for vcpus count %(vcpus)d"
+msgstr ""
+
+#: nova/exception.py:1641
+#, python-format
+msgid "Architecture name '%(arch)s' is not recognised"
+msgstr ""
+
+#: nova/exception.py:1645
+msgid "CPU and memory allocation must be provided for all NUMA nodes"
+msgstr ""
+
+#: nova/exception.py:1650
+#, python-format
+msgid ""
+"Image property '%(name)s' is not permitted to override NUMA configuration"
+" set against the flavor"
+msgstr ""
+
+#: nova/exception.py:1655
+msgid ""
+"Asymmetric NUMA topologies require explicit assignment of CPUs and memory"
+" to nodes in image or flavor"
+msgstr ""
+
+#: nova/exception.py:1660
+#, python-format
+msgid "CPU number %(cpunum)d is larger than max %(cpumax)d"
+msgstr ""
+
+#: nova/exception.py:1664
+#, python-format
+msgid "CPU number %(cpunum)d is assigned to two nodes"
+msgstr ""
+
+#: nova/exception.py:1668
+#, python-format
+msgid "CPU number %(cpuset)s is not assigned to any node"
+msgstr ""
+
+#: nova/exception.py:1672
+#, python-format
+msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB"
+msgstr ""
+
#: nova/filters.py:84
#, python-format
msgid "Filter %s returned 0 hosts"
@@ -1947,124 +2047,128 @@ msgstr "No se ha podido enviar notificación de actualización de estado"
msgid "Failed to get nw_info"
msgstr "No se ha podido obtener nw_info"
-#: nova/quota.py:1326
+#: nova/quota.py:1332
#, python-format
msgid "Failed to commit reservations %s"
msgstr "Ha fallado la entrega de reservas %s|"
-#: nova/quota.py:1349
+#: nova/quota.py:1355
#, python-format
msgid "Failed to roll back reservations %s"
msgstr "Fallo al revertir las reservas %s"
-#: nova/service.py:160
+#: nova/service.py:161
#, python-format
msgid "Starting %(topic)s node (version %(version)s)"
msgstr "Iniciando el nodo %(topic)s (versión %(version)s)"
-#: nova/service.py:285
+#: nova/service.py:286
msgid "Service killed that has no database entry"
msgstr "Se detuvo un servicio sin entrada en la base de datos"
-#: nova/service.py:297
+#: nova/service.py:298
msgid "Service error occurred during cleanup_host"
msgstr "Ha ocurrido un error de servicio durante cleanup_host"
-#: nova/service.py:314
+#: nova/service.py:315
#, python-format
msgid "Temporary directory is invalid: %s"
msgstr "El directorio temporal no es válido: %s"
-#: nova/service.py:339
+#: nova/service.py:340
#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr "El valor %(worker_name)s de %(workers)s es inválido, debe ser mayor que 0."
-#: nova/service.py:424
+#: nova/service.py:433
msgid "serve() can only be called once"
msgstr "serve() sólo se puede llamar una vez "
-#: nova/utils.py:148
+#: nova/utils.py:147
#, python-format
msgid "Expected to receive %(exp)s bytes, but actually %(act)s"
msgstr "Se esperaba recibir %(exp)s bytes, se han recibido %(act)s"
-#: nova/utils.py:354
+#: nova/utils.py:353
#, python-format
msgid "Couldn't get IPv4 : %(ex)s"
msgstr "No se ha podido obtener IPv4: %(ex)s"
-#: nova/utils.py:370
+#: nova/utils.py:369
#, python-format
msgid "IPv4 address is not found.: %s"
msgstr "Dirección IPv4 no encontrada: %s"
-#: nova/utils.py:373
+#: nova/utils.py:372
#, python-format
msgid "Couldn't get IPv4 of %(interface)s : %(ex)s"
msgstr "No se puede obtener la IPv4 de %(interface)s : %(ex)s"
-#: nova/utils.py:388
+#: nova/utils.py:387
#, python-format
msgid "Link Local address is not found.:%s"
msgstr "No se encuentra la dirección del enlace local.:%s"
-#: nova/utils.py:391
+#: nova/utils.py:390
#, python-format
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr "No se pudo obtener enlace de la ip local de %(interface)s :%(ex)s"
-#: nova/utils.py:412
+#: nova/utils.py:411
#, python-format
msgid "Invalid backend: %s"
msgstr "backend inválido: %s"
-#: nova/utils.py:457
+#: nova/utils.py:454
#, python-format
msgid "Expected object of type: %s"
msgstr "Se esperaba un objeto de tipo: %s"
-#: nova/utils.py:485
+#: nova/utils.py:482
#, python-format
msgid "Invalid server_string: %s"
msgstr "Serie de servidor no válida: %s"
-#: nova/utils.py:776 nova/virt/configdrive.py:177
+#: nova/utils.py:773
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr "No se ha podido eliminar directorio temporal: %s"
+#: nova/utils.py:964
+msgid "The input is not a string or unicode"
+msgstr ""
+
#: nova/utils.py:966
#, python-format
msgid "%s is not a string or unicode"
msgstr "%s no es una serie o unicode"
-#: nova/utils.py:970
+#: nova/utils.py:973
#, python-format
msgid "%(name)s has a minimum character requirement of %(min_length)s."
msgstr "%(name)s requiere de, al menos, %(min_length)s caracteres."
-#: nova/utils.py:975
+#: nova/utils.py:978
#, python-format
msgid "%(name)s has more than %(max_length)s characters."
msgstr "%(name)s tiene más de %(max_length)s caracteres."
-#: nova/utils.py:985
+#: nova/utils.py:988
#, python-format
msgid "%(value_name)s must be an integer"
msgstr "%(value_name)s debe ser un entero"
-#: nova/utils.py:991
+#: nova/utils.py:994
#, python-format
msgid "%(value_name)s must be >= %(min_value)d"
msgstr "%(value_name)s debe ser >= %(min_value)d"
-#: nova/utils.py:997
+#: nova/utils.py:1000
#, python-format
msgid "%(value_name)s must be <= %(max_value)d"
msgstr "%(value_name)s debe ser <= %(max_value)d"
-#: nova/utils.py:1031
+#: nova/utils.py:1034
#, python-format
msgid "Hypervisor version %s is invalid."
msgstr ""
@@ -2074,32 +2178,32 @@ msgstr ""
msgid "Failed to load %(cfgfile)s: %(ex)s"
msgstr "Fallo al cargar %(cfgfile)s: %(ex)s"
-#: nova/wsgi.py:132
+#: nova/wsgi.py:133
#, python-format
msgid "Could not bind to %(host)s:%(port)s"
msgstr "No se puede asociar a %(host)s:%(port)s"
-#: nova/wsgi.py:137
+#: nova/wsgi.py:138
#, python-format
msgid "%(name)s listening on %(host)s:%(port)s"
msgstr "%(name)s está escuchando en %(host)s:%(port)s"
-#: nova/wsgi.py:152 nova/openstack/common/sslutils.py:50
+#: nova/wsgi.py:159 nova/openstack/common/sslutils.py:47
#, python-format
msgid "Unable to find cert_file : %s"
msgstr "No se puede encontrar cert_file: %s"
-#: nova/wsgi.py:156 nova/openstack/common/sslutils.py:53
+#: nova/wsgi.py:163 nova/openstack/common/sslutils.py:50
#, python-format
msgid "Unable to find ca_file : %s"
msgstr "No se puede encontrar ca_file: %s"
-#: nova/wsgi.py:160 nova/openstack/common/sslutils.py:56
+#: nova/wsgi.py:167 nova/openstack/common/sslutils.py:53
#, python-format
msgid "Unable to find key_file : %s"
msgstr "No se puede encontrar key_file: %s"
-#: nova/wsgi.py:164 nova/openstack/common/sslutils.py:59
+#: nova/wsgi.py:171 nova/openstack/common/sslutils.py:56
msgid ""
"When running server in SSL mode, you must specify both a cert_file and "
"key_file option value in your configuration file"
@@ -2107,249 +2211,211 @@ msgstr ""
"Al ejecutar el servidor en modalidad SSL, debe especificar un valor para "
"las opciones cert_file y key_file en el archivo de configuración"
-#: nova/wsgi.py:195
+#: nova/wsgi.py:202
#, python-format
msgid "Failed to start %(name)s on %(host)s:%(port)s with SSL support"
msgstr "No se ha podido iniciar %(name)s en %(host)s:%(port)s con soporte SSL"
-#: nova/wsgi.py:223
+#: nova/wsgi.py:238
msgid "Stopping WSGI server."
msgstr "Deteniendo el servidor WSGI. "
-#: nova/wsgi.py:242
+#: nova/wsgi.py:258
msgid "WSGI server has stopped."
msgstr "El servidor WSGI se ha detenido."
-#: nova/wsgi.py:311
+#: nova/wsgi.py:327
msgid "You must implement __call__"
msgstr "Debe implementar __call__"
-#: nova/api/auth.py:72
-msgid "ratelimit_v3 is removed from v3 api."
-msgstr "ratelimit_v3 se ha removido de la api v3."
-
-#: nova/api/auth.py:135
+#: nova/api/auth.py:136
msgid "Invalid service catalog json."
msgstr "JSON de catálogo de servicios no válido."
-#: nova/api/auth.py:159
-msgid "Sourcing roles from deprecated X-Role HTTP header"
-msgstr "Proporcionando roles de cabecera HTTP de rol X en desuso"
-
#: nova/api/sizelimit.py:53 nova/api/sizelimit.py:62 nova/api/sizelimit.py:76
#: nova/api/metadata/password.py:62
msgid "Request is too large."
msgstr "La solicitud es demasiado larga."
-#: nova/api/ec2/__init__.py:88
+#: nova/api/ec2/__init__.py:89
#, python-format
msgid "FaultWrapper: %s"
msgstr "FaultWrapper: %s"
-#: nova/api/ec2/__init__.py:159
+#: nova/api/ec2/__init__.py:160
msgid "Too many failed authentications."
msgstr "Demasiados intentos de autenticacion fallidos."
-#: nova/api/ec2/__init__.py:168
-#, python-format
-msgid ""
-"Access key %(access_key)s has had %(failures)d failed authentications and"
-" will be locked out for %(lock_mins)d minutes."
-msgstr ""
-"La clave de acceso %(access_key)s ha tenido %(failures)d autenticaciones "
-"anómalas y estará bloqueada durante %(lock_mins)d minutos."
-
-#: nova/api/ec2/__init__.py:187
+#: nova/api/ec2/__init__.py:188
msgid "Signature not provided"
msgstr "Firma no proporcionada"
-#: nova/api/ec2/__init__.py:192
+#: nova/api/ec2/__init__.py:193
msgid "Access key not provided"
msgstr "Clave de acceso no proporcionada"
-#: nova/api/ec2/__init__.py:228 nova/api/ec2/__init__.py:244
+#: nova/api/ec2/__init__.py:229 nova/api/ec2/__init__.py:245
msgid "Failure communicating with keystone"
msgstr "Anomalía al comunicarse con keystone"
-#: nova/api/ec2/__init__.py:304
+#: nova/api/ec2/__init__.py:305
msgid "Timestamp failed validation."
msgstr "Ha fallado la validación de indicación de fecha y hora."
-#: nova/api/ec2/__init__.py:402
+#: nova/api/ec2/__init__.py:403
#, python-format
msgid "Unauthorized request for controller=%(controller)s and action=%(action)s"
msgstr ""
"Solicitud no autorizada para el controlador=%(controller)s y la "
"acción=%(action)s"
-#: nova/api/ec2/__init__.py:492
-#, python-format
-msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
-msgstr "Encontrado %(ex_name)s inesperado : %(ex_str)s"
-
-#: nova/api/ec2/__init__.py:495
-#, python-format
-msgid "%(ex_name)s raised: %(ex_str)s"
-msgstr "%(ex_name)s encontrado: %(ex_str)s"
-
-#: nova/api/ec2/__init__.py:519
-#, python-format
-msgid "Environment: %s"
-msgstr "Entorno: %s"
-
-#: nova/api/ec2/__init__.py:521
+#: nova/api/ec2/__init__.py:522
msgid "Unknown error occurred."
msgstr "Ha ocurrido un error desconocido."
-#: nova/api/ec2/cloud.py:395
+#: nova/api/ec2/cloud.py:391
#, python-format
msgid "Create snapshot of volume %s"
msgstr "Crear instantánea del volumen %s"
-#: nova/api/ec2/cloud.py:420
+#: nova/api/ec2/cloud.py:418
#, python-format
msgid "Could not find key pair(s): %s"
msgstr "No se ha podido encontrar par(es) de claves: %s "
-#: nova/api/ec2/cloud.py:436
+#: nova/api/ec2/cloud.py:434
#, python-format
msgid "Create key pair %s"
msgstr "Creando par de claves %s"
-#: nova/api/ec2/cloud.py:448
+#: nova/api/ec2/cloud.py:446
#, python-format
msgid "Import key %s"
msgstr "Importar la clave %s"
-#: nova/api/ec2/cloud.py:461
+#: nova/api/ec2/cloud.py:459
#, python-format
msgid "Delete key pair %s"
msgstr "Borrar para de claves %s"
-#: nova/api/ec2/cloud.py:603 nova/api/ec2/cloud.py:733
+#: nova/api/ec2/cloud.py:601 nova/api/ec2/cloud.py:731
msgid "need group_name or group_id"
msgstr "se necesita group_name o group_id"
-#: nova/api/ec2/cloud.py:608
+#: nova/api/ec2/cloud.py:606
msgid "can't build a valid rule"
msgstr "No se ha podido crear una regla válida"
-#: nova/api/ec2/cloud.py:616
+#: nova/api/ec2/cloud.py:614
#, python-format
msgid "Invalid IP protocol %(protocol)s"
msgstr "Protocolo IP no válido %(protocol)s"
-#: nova/api/ec2/cloud.py:650 nova/api/ec2/cloud.py:686
+#: nova/api/ec2/cloud.py:648 nova/api/ec2/cloud.py:684
msgid "No rule for the specified parameters."
msgstr "No hay regla para los parámetros especificados."
-#: nova/api/ec2/cloud.py:764
+#: nova/api/ec2/cloud.py:762
#, python-format
msgid "Get console output for instance %s"
msgstr "Obtener salida de la consola para la instancia %s"
-#: nova/api/ec2/cloud.py:836
+#: nova/api/ec2/cloud.py:834
#, python-format
msgid "Create volume from snapshot %s"
msgstr "Crear volumen desde la instantánea %s"
-#: nova/api/ec2/cloud.py:840 nova/api/openstack/compute/contrib/volumes.py:243
+#: nova/api/ec2/cloud.py:838 nova/api/openstack/compute/contrib/volumes.py:243
#, python-format
msgid "Create volume of %s GB"
msgstr "Crear volumen de %s GB"
-#: nova/api/ec2/cloud.py:880
+#: nova/api/ec2/cloud.py:878
#, python-format
msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
msgstr ""
"Conectar el volumen %(volume_id)s a la instancia %(instance_id)s en "
"%(device)s"
-#: nova/api/ec2/cloud.py:910 nova/api/openstack/compute/contrib/volumes.py:506
+#: nova/api/ec2/cloud.py:908 nova/api/openstack/compute/contrib/volumes.py:506
#, python-format
msgid "Detach volume %s"
msgstr "Desasociar volumen %s"
-#: nova/api/ec2/cloud.py:1242
+#: nova/api/ec2/cloud.py:1262
msgid "Allocate address"
msgstr "Asignar dirección"
-#: nova/api/ec2/cloud.py:1247
+#: nova/api/ec2/cloud.py:1267
#, python-format
msgid "Release address %s"
msgstr "Liberar dirección %s"
-#: nova/api/ec2/cloud.py:1252
+#: nova/api/ec2/cloud.py:1272
#, python-format
msgid "Associate address %(public_ip)s to instance %(instance_id)s"
msgstr "Asociar dirección %(public_ip)s a instancia %(instance_id)s"
-#: nova/api/ec2/cloud.py:1262
+#: nova/api/ec2/cloud.py:1282
msgid "Unable to associate IP Address, no fixed_ips."
msgstr "No se puede asociar la dirección IP, sin fixed_ips."
-#: nova/api/ec2/cloud.py:1270
-#: nova/api/openstack/compute/contrib/floating_ips.py:249
-#, python-format
-msgid "multiple fixed_ips exist, using the first: %s"
-msgstr "existen múltiples fixed_ips, utilizando la primera: %s"
-
-#: nova/api/ec2/cloud.py:1283
+#: nova/api/ec2/cloud.py:1303
#, python-format
msgid "Disassociate address %s"
msgstr "Desasociar dirección %s"
-#: nova/api/ec2/cloud.py:1300 nova/api/openstack/compute/servers.py:918
+#: nova/api/ec2/cloud.py:1320 nova/api/openstack/compute/servers.py:920
#: nova/api/openstack/compute/plugins/v3/multiple_create.py:64
msgid "min_count must be <= max_count"
msgstr "min_count debe ser <= max_count "
-#: nova/api/ec2/cloud.py:1332
+#: nova/api/ec2/cloud.py:1352
msgid "Image must be available"
msgstr "La imagen debe estar disponible "
-#: nova/api/ec2/cloud.py:1429
+#: nova/api/ec2/cloud.py:1452
#, python-format
msgid "Reboot instance %r"
msgstr "Reiniciar instancia %r"
-#: nova/api/ec2/cloud.py:1542
+#: nova/api/ec2/cloud.py:1567
#, python-format
msgid "De-registering image %s"
msgstr "Des-registrando la imagen %s"
-#: nova/api/ec2/cloud.py:1558
+#: nova/api/ec2/cloud.py:1583
msgid "imageLocation is required"
msgstr "Se necesita imageLocation"
-#: nova/api/ec2/cloud.py:1578
+#: nova/api/ec2/cloud.py:1603
#, python-format
msgid "Registered image %(image_location)s with id %(image_id)s"
msgstr "Imagen registrada %(image_location)s con el id %(image_id)s"
-#: nova/api/ec2/cloud.py:1639
+#: nova/api/ec2/cloud.py:1664
msgid "user or group not specified"
msgstr "usuario o grupo no especificado"
-#: nova/api/ec2/cloud.py:1642
+#: nova/api/ec2/cloud.py:1667
msgid "only group \"all\" is supported"
msgstr "sólo el grupo \"all\" está soportado"
-#: nova/api/ec2/cloud.py:1645
+#: nova/api/ec2/cloud.py:1670
msgid "operation_type must be add or remove"
msgstr "operation_type debe ser añadir o eliminar"
-#: nova/api/ec2/cloud.py:1647
+#: nova/api/ec2/cloud.py:1672
#, python-format
msgid "Updating image %s publicity"
msgstr "Actualizando imagen %s públicamente"
-#: nova/api/ec2/cloud.py:1660
+#: nova/api/ec2/cloud.py:1685
#, python-format
msgid "Not allowed to modify attributes for image %s"
msgstr "No está permitido modificar los atributos para la imagen %s"
-#: nova/api/ec2/cloud.py:1686
+#: nova/api/ec2/cloud.py:1715
#, python-format
msgid ""
"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not "
@@ -2358,313 +2424,205 @@ msgstr ""
"Valor no válido '%(ec2_instance_id)s' para el ID de instancia. La "
"instancia no tiene ningún volumen conectado en la raíz (%(root)s)."
-#: nova/api/ec2/cloud.py:1717
+#: nova/api/ec2/cloud.py:1748
#, python-format
-msgid "Couldn't stop instance within %d sec"
-msgstr "No se puede detener una instancia en menos de %d segundos"
+msgid ""
+"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
+"%(vm_state)s, current task_state: %(task_state)s"
+msgstr ""
-#: nova/api/ec2/cloud.py:1736
+#: nova/api/ec2/cloud.py:1772
#, python-format
msgid "image of %(instance)s at %(now)s"
msgstr "imagen de %(instance)s en %(now)s"
-#: nova/api/ec2/cloud.py:1761 nova/api/ec2/cloud.py:1811
+#: nova/api/ec2/cloud.py:1797 nova/api/ec2/cloud.py:1847
msgid "resource_id and tag are required"
msgstr "resource_id y tag son necesarios"
-#: nova/api/ec2/cloud.py:1765 nova/api/ec2/cloud.py:1815
+#: nova/api/ec2/cloud.py:1801 nova/api/ec2/cloud.py:1851
msgid "Expecting a list of resources"
msgstr "Esperando una lista de recursos"
-#: nova/api/ec2/cloud.py:1770 nova/api/ec2/cloud.py:1820
-#: nova/api/ec2/cloud.py:1878
+#: nova/api/ec2/cloud.py:1806 nova/api/ec2/cloud.py:1856
+#: nova/api/ec2/cloud.py:1914
msgid "Only instances implemented"
msgstr "Sólo están implementadas instancias"
-#: nova/api/ec2/cloud.py:1774 nova/api/ec2/cloud.py:1824
+#: nova/api/ec2/cloud.py:1810 nova/api/ec2/cloud.py:1860
msgid "Expecting a list of tagSets"
msgstr "Esperando una lista de tagSets"
-#: nova/api/ec2/cloud.py:1780 nova/api/ec2/cloud.py:1833
+#: nova/api/ec2/cloud.py:1816 nova/api/ec2/cloud.py:1869
msgid "Expecting tagSet to be key/value pairs"
msgstr "Esperando que tagSet sea un par clave/valor"
-#: nova/api/ec2/cloud.py:1787
+#: nova/api/ec2/cloud.py:1823
msgid "Expecting both key and value to be set"
msgstr "Esperando establecimiento tanto de clave como valor"
-#: nova/api/ec2/cloud.py:1838
+#: nova/api/ec2/cloud.py:1874
msgid "Expecting key to be set"
msgstr "Esperando el establecimiento de la clave"
-#: nova/api/ec2/cloud.py:1912
+#: nova/api/ec2/cloud.py:1948
msgid "Invalid CIDR"
msgstr "CIDR no válido"
-#: nova/api/ec2/ec2utils.py:254
+#: nova/api/ec2/ec2utils.py:255
#, python-format
msgid "Unacceptable attach status:%s for ec2 API."
msgstr ""
-#: nova/api/ec2/ec2utils.py:277
+#: nova/api/ec2/ec2utils.py:278
msgid "Request must include either Timestamp or Expires, but cannot contain both"
msgstr ""
"La solicitud debe incluir Timestamp o Expires, pero no puede contener "
"ambos"
-#: nova/api/ec2/ec2utils.py:295
+#: nova/api/ec2/ec2utils.py:296
msgid "Timestamp is invalid."
msgstr "La indicación de fecha y hora no es válida."
-#: nova/api/metadata/handler.py:111
-msgid ""
-"X-Instance-ID present in request headers. The "
-"'service_neutron_metadata_proxy' option must be enabled to process this "
-"header."
-msgstr ""
-"X-Instance-ID presente en encabezados de soicitud. La opción "
-"'service_neutron_metadata_proy' debe ser habilitada para procesar este "
-"encabezado."
-
-#: nova/api/metadata/handler.py:140 nova/api/metadata/handler.py:147
+#: nova/api/metadata/handler.py:148
#, python-format
msgid "Failed to get metadata for ip: %s"
msgstr "Fallo al generar metadatos para la ip %s"
-#: nova/api/metadata/handler.py:142 nova/api/metadata/handler.py:198
+#: nova/api/metadata/handler.py:150 nova/api/metadata/handler.py:207
msgid "An unknown error has occurred. Please try your request again."
msgstr "Ha sucedido un error desconocido. Por favor repite el intento de nuevo."
-#: nova/api/metadata/handler.py:160
+#: nova/api/metadata/handler.py:169
msgid "X-Instance-ID header is missing from request."
msgstr "Falta la cabecera de ID de instancia X en la solicitud."
-#: nova/api/metadata/handler.py:162
+#: nova/api/metadata/handler.py:171
msgid "X-Tenant-ID header is missing from request."
msgstr "el encabezado X-Tenant-ID falta en la solicitud."
-#: nova/api/metadata/handler.py:164
+#: nova/api/metadata/handler.py:173
msgid "Multiple X-Instance-ID headers found within request."
msgstr "Se han encontrado varias cabeceas de ID de instancia X en la solicitud."
-#: nova/api/metadata/handler.py:166
+#: nova/api/metadata/handler.py:175
msgid "Multiple X-Tenant-ID headers found within request."
msgstr "Se han encontrado múltiples encabezados X-Tenant-ID en la solicitud."
-#: nova/api/metadata/handler.py:180
-#, python-format
-msgid ""
-"X-Instance-ID-Signature: %(signature)s does not match the expected value:"
-" %(expected_signature)s for id: %(instance_id)s. Request From: "
-"%(remote_address)s"
-msgstr ""
-"Firma_ID_instancia_X: %(signature)s no coincide con el valor esperado: "
-"%(expected_signature)s para el ID: %(instance_id)s. Solicitud desde: "
-"%(remote_address)s "
-
-#: nova/api/metadata/handler.py:189
+#: nova/api/metadata/handler.py:198
msgid "Invalid proxy request signature."
msgstr "Firma de solicitud de proxy no válida."
-#: nova/api/metadata/handler.py:196 nova/api/metadata/handler.py:203
+#: nova/api/metadata/handler.py:205
#, python-format
msgid "Failed to get metadata for instance id: %s"
msgstr "No se han podido obtener metadatos para el id de instancia: %s"
-#: nova/api/metadata/handler.py:207
-#, python-format
-msgid ""
-"Tenant_id %(tenant_id)s does not match tenant_id of instance "
-"%(instance_id)s."
-msgstr ""
-"Tenant_id %(tenant_id)s no coincide con tenant_id de la instancia "
-"%(instance_id)s."
-
-#: nova/api/metadata/vendordata_json.py:47
-msgid "file does not exist"
-msgstr "No existe el fichero"
-
-#: nova/api/metadata/vendordata_json.py:49
-msgid "Unexpected IOError when reading"
-msgstr "Error de E/S no esperado al leer"
-
-#: nova/api/metadata/vendordata_json.py:52
-msgid "failed to load json"
-msgstr "Ha fallado la carga de json"
-
-#: nova/api/openstack/__init__.py:89
+#: nova/api/openstack/__init__.py:92
#, python-format
msgid "Caught error: %s"
msgstr "Capturado error: %s"
-#: nova/api/openstack/__init__.py:98
-#, python-format
-msgid "%(url)s returned with HTTP %(status)d"
-msgstr "Se ha devuelto %(url)s con HTTP %(status)d"
-
-#: nova/api/openstack/__init__.py:190
+#: nova/api/openstack/__init__.py:189
msgid "Must specify an ExtensionManager class"
msgstr "Debe especificar una clase ExtensionManager"
-#: nova/api/openstack/__init__.py:236 nova/api/openstack/__init__.py:410
-#, python-format
-msgid ""
-"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such "
-"resource"
-msgstr ""
-"Ampliación %(ext_name)s: no se puede ampliar el recurso %(collection)s: "
-"no existe dicho recurso."
-
-#: nova/api/openstack/__init__.py:283
-#: nova/api/openstack/compute/plugins/v3/servers.py:99
-#, python-format
-msgid "Not loading %s because it is in the blacklist"
-msgstr "No se ha cargado %s porque está en la lista negra"
-
-#: nova/api/openstack/__init__.py:288
-#: nova/api/openstack/compute/plugins/v3/servers.py:104
-#, python-format
-msgid "Not loading %s because it is not in the whitelist"
-msgstr "No se ha cargado %s porque no está en la lista blanca"
-
-#: nova/api/openstack/__init__.py:295
-msgid "V3 API has been disabled by configuration"
-msgstr ""
-
-#: nova/api/openstack/__init__.py:308
-#, python-format
-msgid "Extensions in both blacklist and whitelist: %s"
-msgstr "Extensiones en lista restrictiva y lista permisiva: %s"
-
-#: nova/api/openstack/__init__.py:332
-#, python-format
-msgid "Missing core API extensions: %s"
-msgstr "Extensiones core API omitidas: %s"
-
-#: nova/api/openstack/common.py:132
-#, python-format
-msgid ""
-"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. "
-"Bad upgrade or db corrupted?"
-msgstr ""
-"el estado es UNKNOWN de vm_state=%(vm_state)s task_state=%(task_state)s. "
-"¿Actualización errónea o base de datos dañada?"
-
-#: nova/api/openstack/common.py:182
+#: nova/api/openstack/common.py:185
#, python-format
msgid "%s param must be an integer"
msgstr "El parámetro %s debe ser un entero"
-#: nova/api/openstack/common.py:185
+#: nova/api/openstack/common.py:188
#, python-format
msgid "%s param must be positive"
msgstr "El parámetro %s debe ser positivo"
-#: nova/api/openstack/common.py:210
+#: nova/api/openstack/common.py:213
msgid "offset param must be an integer"
msgstr "el parámetro de desplazamiento debe ser un entero"
-#: nova/api/openstack/common.py:216
+#: nova/api/openstack/common.py:219
msgid "limit param must be an integer"
msgstr "el parámetro de límite debe ser un entero"
-#: nova/api/openstack/common.py:220
+#: nova/api/openstack/common.py:223
msgid "limit param must be positive"
msgstr "el parámetro de límite debe ser positivo"
-#: nova/api/openstack/common.py:224
+#: nova/api/openstack/common.py:227
msgid "offset param must be positive"
msgstr "el parámetro de desplazamiento debe ser positivo"
-#: nova/api/openstack/common.py:259 nova/api/openstack/compute/flavors.py:146
-#: nova/api/openstack/compute/servers.py:603
-#: nova/api/openstack/compute/plugins/v3/flavors.py:110
-#: nova/api/openstack/compute/plugins/v3/servers.py:280
-#, python-format
-msgid "marker [%s] not found"
-msgstr "no se ha encontrado el marcador [%s]"
-
-#: nova/api/openstack/common.py:299
+#: nova/api/openstack/common.py:280
#, python-format
msgid "href %s does not contain version"
msgstr "href %s no contiene la versión"
-#: nova/api/openstack/common.py:314
+#: nova/api/openstack/common.py:293
msgid "Image metadata limit exceeded"
msgstr "Se ha superado el límite de metadatos de imágenes"
-#: nova/api/openstack/common.py:322
+#: nova/api/openstack/common.py:301
msgid "Image metadata key cannot be blank"
msgstr "La clave de metadatos de imagen no puede estar en blanco"
-#: nova/api/openstack/common.py:325
+#: nova/api/openstack/common.py:304
msgid "Image metadata key too long"
msgstr "La clave de metadatos de imagen es demasiado larga"
-#: nova/api/openstack/common.py:328
+#: nova/api/openstack/common.py:307
msgid "Invalid image metadata"
msgstr "Metadatos de imagen no válidos "
-#: nova/api/openstack/common.py:391
+#: nova/api/openstack/common.py:370
#, python-format
msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s"
msgstr "No se puede '%(action)s' mientras la instancia está en %(attr)s %(state)s"
-#: nova/api/openstack/common.py:394
+#: nova/api/openstack/common.py:373
#, python-format
msgid "Cannot '%s' an instance which has never been active"
msgstr "No se puede '%s' una instancia que nunca ha estado activa"
-#: nova/api/openstack/common.py:397
+#: nova/api/openstack/common.py:376
#, python-format
msgid "Instance is in an invalid state for '%s'"
msgstr "La instancia se encuentra en un estado inválido para '%s'"
-#: nova/api/openstack/common.py:477
-msgid "Rejecting snapshot request, snapshots currently disabled"
-msgstr ""
-"Rechazando solicitud de instantánea, instantáneas inhabilitadas "
-"actualmente"
-
-#: nova/api/openstack/common.py:479
+#: nova/api/openstack/common.py:458
msgid "Instance snapshots are not permitted at this time."
msgstr "Las instantáneas de instancia no están permitidas en este momento."
-#: nova/api/openstack/common.py:600
+#: nova/api/openstack/common.py:579
msgid "Cells is not enabled."
msgstr "Las celdas no están habilitadas."
-#: nova/api/openstack/extensions.py:197
+#: nova/api/openstack/extensions.py:198
#, python-format
msgid "Loaded extension: %s"
msgstr "Ampliación cargada: %s"
-#: nova/api/openstack/extensions.py:243
+#: nova/api/openstack/extensions.py:244
#: nova/api/openstack/compute/plugins/__init__.py:51
#, python-format
msgid "Exception loading extension: %s"
msgstr "Excepción al cargar ampliación: %s"
-#: nova/api/openstack/extensions.py:278
-#, python-format
-msgid "Failed to load extension %(ext_factory)s: %(exc)s"
-msgstr "No se ha podido cargar la ampliación %(ext_factory)s: %(exc)s"
-
-#: nova/api/openstack/extensions.py:349
+#: nova/api/openstack/extensions.py:350
#, python-format
msgid "Failed to load extension %(classpath)s: %(exc)s"
msgstr "No se ha podido cargar la ampliación %(classpath)s: %(exc)s"
-#: nova/api/openstack/extensions.py:372
+#: nova/api/openstack/extensions.py:373
#, python-format
msgid "Failed to load extension %(ext_name)s:%(exc)s"
msgstr "Fallo al cargar extensión %(ext_name)s:%(exc)s"
-#: nova/api/openstack/extensions.py:494
+#: nova/api/openstack/extensions.py:495
msgid "Unexpected exception in API method"
msgstr "Excepción inesperada en método API."
-#: nova/api/openstack/extensions.py:495
+#: nova/api/openstack/extensions.py:496
#, python-format
msgid ""
"Unexpected API Error. Please report this at "
@@ -2677,56 +2635,41 @@ msgstr ""
"posible.\n"
"%s"
-#: nova/api/openstack/wsgi.py:228 nova/api/openstack/wsgi.py:633
+#: nova/api/openstack/wsgi.py:230 nova/api/openstack/wsgi.py:635
msgid "cannot understand JSON"
msgstr "no se puede entender JSON"
-#: nova/api/openstack/wsgi.py:638
+#: nova/api/openstack/wsgi.py:640
msgid "too many body keys"
msgstr "demasiadas claves de cuerpo"
-#: nova/api/openstack/wsgi.py:682
-#, python-format
-msgid "Exception handling resource: %s"
-msgstr "Excepción al manejar recurso: %s"
-
-#: nova/api/openstack/wsgi.py:686
-#, python-format
-msgid "Fault thrown: %s"
-msgstr "Error emitido: %s"
-
-#: nova/api/openstack/wsgi.py:689
-#, python-format
-msgid "HTTP exception thrown: %s"
-msgstr "Excepción de HTTP emitida: %s"
-
-#: nova/api/openstack/wsgi.py:919
+#: nova/api/openstack/wsgi.py:921
#, python-format
msgid "There is no such action: %s"
msgstr "No existe esta acción: %s"
-#: nova/api/openstack/wsgi.py:922 nova/api/openstack/wsgi.py:949
+#: nova/api/openstack/wsgi.py:924 nova/api/openstack/wsgi.py:951
#: nova/api/openstack/compute/server_metadata.py:57
#: nova/api/openstack/compute/server_metadata.py:75
#: nova/api/openstack/compute/server_metadata.py:100
#: nova/api/openstack/compute/server_metadata.py:126
-#: nova/api/openstack/compute/contrib/evacuate.py:45
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:58
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:73
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:95
+#: nova/api/openstack/compute/contrib/evacuate.py:47
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:60
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:75
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:97
msgid "Malformed request body"
msgstr "Cuerpo de solicitud formado incorrectamente"
-#: nova/api/openstack/wsgi.py:926
+#: nova/api/openstack/wsgi.py:928
#, python-format
msgid "Action: '%(action)s', body: %(body)s"
msgstr "Acción: '%(action)s', cuerpo: %(body)s"
-#: nova/api/openstack/wsgi.py:946
+#: nova/api/openstack/wsgi.py:948
msgid "Unsupported Content-Type"
msgstr "Tipo de contenido no soportado"
-#: nova/api/openstack/wsgi.py:958
+#: nova/api/openstack/wsgi.py:960
#, python-format
msgid ""
"Malformed request URL: URL's project_id '%(project_id)s' doesn't match "
@@ -2759,7 +2702,7 @@ msgid "Initializing extension manager."
msgstr "Inicializando gestor de ampliación."
#: nova/api/openstack/compute/flavors.py:107
-#: nova/api/openstack/compute/plugins/v3/flavors.py:70
+#: nova/api/openstack/compute/plugins/v3/flavors.py:72
#, python-format
msgid "Invalid is_public filter [%s]"
msgstr "Filtro is_public no válido [%s]"
@@ -2774,51 +2717,58 @@ msgstr "Filtro minRam no válido [%s]"
msgid "Invalid minDisk filter [%s]"
msgstr "Filtro minDisk no válido [%s]"
-#: nova/api/openstack/compute/image_metadata.py:35
-#: nova/api/openstack/compute/images.py:141
-#: nova/api/openstack/compute/images.py:157
+#: nova/api/openstack/compute/flavors.py:146
+#: nova/api/openstack/compute/servers.py:606
+#: nova/api/openstack/compute/plugins/v3/flavors.py:112
+#: nova/api/openstack/compute/plugins/v3/servers.py:303
+#, python-format
+msgid "marker [%s] not found"
+msgstr "no se ha encontrado el marcador [%s]"
+
+#: nova/api/openstack/compute/image_metadata.py:37
+#: nova/api/openstack/compute/images.py:135
+#: nova/api/openstack/compute/images.py:151
msgid "Image not found."
msgstr "Imagen no encontrada."
-#: nova/api/openstack/compute/image_metadata.py:78
+#: nova/api/openstack/compute/image_metadata.py:81
msgid "Incorrect request body format"
msgstr "Formato de cuerpo de solicitud incorrecto"
-#: nova/api/openstack/compute/image_metadata.py:82
+#: nova/api/openstack/compute/image_metadata.py:85
#: nova/api/openstack/compute/server_metadata.py:79
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:108
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:85
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:77
+#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:72
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:79
msgid "Request body and URI mismatch"
msgstr "Discrepancia de URI y cuerpo de solicitud"
-#: nova/api/openstack/compute/image_metadata.py:85
+#: nova/api/openstack/compute/image_metadata.py:88
#: nova/api/openstack/compute/server_metadata.py:83
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:111
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:88
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:81
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:83
msgid "Request body contains too many items"
msgstr "El cuerpo de solicitud contiene demasiados elementos"
-#: nova/api/openstack/compute/image_metadata.py:117
+#: nova/api/openstack/compute/image_metadata.py:122
msgid "Invalid metadata key"
msgstr "Clave de metadatos no válida"
-#: nova/api/openstack/compute/images.py:162
+#: nova/api/openstack/compute/images.py:156
msgid "You are not allowed to delete the image."
msgstr "No le está permitido suprimir la imagen."
#: nova/api/openstack/compute/ips.py:67
-#: nova/api/openstack/compute/plugins/v3/ips.py:39
+#: nova/api/openstack/compute/plugins/v3/ips.py:41
msgid "Instance does not exist"
msgstr "La instancia no existe "
-#: nova/api/openstack/compute/ips.py:90
-#: nova/api/openstack/compute/plugins/v3/ips.py:60
+#: nova/api/openstack/compute/ips.py:84
+#: nova/api/openstack/compute/plugins/v3/ips.py:56
msgid "Instance is not a member of specified network"
msgstr "La instancia no es miembro de la red especificada"
-#: nova/api/openstack/compute/limits.py:161
+#: nova/api/openstack/compute/limits.py:162
#, python-format
msgid ""
"Only %(value)s %(verb)s request(s) can be made to %(uri)s every "
@@ -2827,141 +2777,126 @@ msgstr ""
"Solo se pueden realizar %(value)s solicitud(es) de %(verb)s para %(uri)s "
"cada %(unit_string)s."
-#: nova/api/openstack/compute/limits.py:287
+#: nova/api/openstack/compute/limits.py:288
msgid "This request was rate-limited."
msgstr "Esta solicitud estaba limitada por tipo."
#: nova/api/openstack/compute/server_metadata.py:37
#: nova/api/openstack/compute/server_metadata.py:122
#: nova/api/openstack/compute/server_metadata.py:177
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:41
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:43
msgid "Server does not exist"
msgstr "El servidor no existe"
#: nova/api/openstack/compute/server_metadata.py:157
#: nova/api/openstack/compute/server_metadata.py:168
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:144
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:156
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:146
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:158
msgid "Metadata item was not found"
-msgstr "No se ha encontrado el elemento metadatos"
-
-#: nova/api/openstack/compute/servers.py:81
-msgid ""
-"XML support has been deprecated and may be removed as early as the Juno "
-"release."
-msgstr ""
-"El soporte XML está obsoleto y podrá ser removito tan pronto como la "
-"liberación de Juno."
+msgstr "No se ha encontrado el elemento metadatos"
-#: nova/api/openstack/compute/servers.py:551
-#: nova/api/openstack/compute/contrib/cells.py:423
-#: nova/api/openstack/compute/plugins/v3/cells.py:331
+#: nova/api/openstack/compute/servers.py:554
+#: nova/api/openstack/compute/contrib/cells.py:427
msgid "Invalid changes-since value"
msgstr "Valor de changes-since no válido"
-#: nova/api/openstack/compute/servers.py:570
-#: nova/api/openstack/compute/plugins/v3/servers.py:234
+#: nova/api/openstack/compute/servers.py:573
+#: nova/api/openstack/compute/plugins/v3/servers.py:257
msgid "Only administrators may list deleted instances"
msgstr "Sólo los administradores pueden listar instancias suprimidas "
-#: nova/api/openstack/compute/servers.py:606
-#: nova/api/openstack/compute/plugins/v3/servers.py:283
-#, python-format
-msgid "Flavor '%s' could not be found "
-msgstr "El sabor '%s' no se ha podido encontrar "
-
-#: nova/api/openstack/compute/servers.py:625
-#: nova/api/openstack/compute/servers.py:772
-#: nova/api/openstack/compute/servers.py:1079
-#: nova/api/openstack/compute/servers.py:1199
-#: nova/api/openstack/compute/servers.py:1384
-#: nova/api/openstack/compute/plugins/v3/servers.py:615
-#: nova/api/openstack/compute/plugins/v3/servers.py:727
-#: nova/api/openstack/compute/plugins/v3/servers.py:846
+#: nova/api/openstack/compute/servers.py:627
+#: nova/api/openstack/compute/servers.py:774
+#: nova/api/openstack/compute/servers.py:1078
+#: nova/api/openstack/compute/servers.py:1203
+#: nova/api/openstack/compute/servers.py:1388
+#: nova/api/openstack/compute/plugins/v3/servers.py:650
+#: nova/api/openstack/compute/plugins/v3/servers.py:768
+#: nova/api/openstack/compute/plugins/v3/servers.py:889
msgid "Instance could not be found"
msgstr "No se ha podido encontrar la instancia"
-#: nova/api/openstack/compute/servers.py:656
+#: nova/api/openstack/compute/servers.py:658
#, python-format
msgid "Bad personality format: missing %s"
msgstr "Formato de personalidad incorrecto : faltan %s "
-#: nova/api/openstack/compute/servers.py:659
+#: nova/api/openstack/compute/servers.py:661
msgid "Bad personality format"
msgstr "Formato de personalidad incorrecto "
-#: nova/api/openstack/compute/servers.py:662
+#: nova/api/openstack/compute/servers.py:664
#, python-format
msgid "Personality content for %s cannot be decoded"
msgstr "No se puede decodificar el contenido de personalidad para %s"
-#: nova/api/openstack/compute/servers.py:677
+#: nova/api/openstack/compute/servers.py:679
msgid "Unknown argument : port"
msgstr "Argumento desconocido: puerto"
-#: nova/api/openstack/compute/servers.py:680
-#: nova/api/openstack/compute/plugins/v3/servers.py:338
+#: nova/api/openstack/compute/servers.py:682
+#: nova/api/openstack/compute/plugins/v3/servers.py:361
#, python-format
msgid "Bad port format: port uuid is not in proper format (%s)"
msgstr ""
"Formato de puerto incorrecto: uuid de puerto no tiene el formato correcto"
" (%s) "
-#: nova/api/openstack/compute/servers.py:690
-#: nova/api/openstack/compute/plugins/v3/servers.py:354
+#: nova/api/openstack/compute/servers.py:692
+#: nova/api/openstack/compute/plugins/v3/servers.py:377
#, python-format
msgid "Bad networks format: network uuid is not in proper format (%s)"
msgstr ""
"Formato incorrecto de redes: el uuid de red no está en el formato "
"correcto (%s) "
-#: nova/api/openstack/compute/servers.py:701
-#: nova/api/openstack/compute/plugins/v3/servers.py:327
+#: nova/api/openstack/compute/servers.py:703
+#: nova/api/openstack/compute/plugins/v3/servers.py:350
#, python-format
msgid "Invalid fixed IP address (%s)"
msgstr "Dirección IP fija no válida (%s) "
-#: nova/api/openstack/compute/servers.py:714
-#: nova/api/openstack/compute/plugins/v3/servers.py:369
+#: nova/api/openstack/compute/servers.py:716
+#: nova/api/openstack/compute/plugins/v3/servers.py:392
#, python-format
msgid "Duplicate networks (%s) are not allowed"
msgstr "Las redes duplicadas (%s) no están permitidas"
-#: nova/api/openstack/compute/servers.py:720
-#: nova/api/openstack/compute/plugins/v3/servers.py:375
+#: nova/api/openstack/compute/servers.py:722
+#: nova/api/openstack/compute/plugins/v3/servers.py:398
#, python-format
msgid "Bad network format: missing %s"
msgstr "Formato de red erróneo: falta %s"
-#: nova/api/openstack/compute/servers.py:723
-#: nova/api/openstack/compute/servers.py:824
-#: nova/api/openstack/compute/plugins/v3/servers.py:378
+#: nova/api/openstack/compute/servers.py:725
+#: nova/api/openstack/compute/servers.py:826
+#: nova/api/openstack/compute/plugins/v3/servers.py:401
msgid "Bad networks format"
msgstr "Formato de redes erróneo"
-#: nova/api/openstack/compute/servers.py:749
+#: nova/api/openstack/compute/servers.py:751
msgid "Userdata content cannot be decoded"
msgstr "No se puede decodificar el contenido de datos de usuario"
-#: nova/api/openstack/compute/servers.py:754
+#: nova/api/openstack/compute/servers.py:756
msgid "accessIPv4 is not proper IPv4 format"
msgstr "accessIPv4 no está en formato IPv4 correcto"
-#: nova/api/openstack/compute/servers.py:759
+#: nova/api/openstack/compute/servers.py:761
msgid "accessIPv6 is not proper IPv6 format"
msgstr "accessIPv6 no está en formato IPv6 correcto"
-#: nova/api/openstack/compute/servers.py:788
-#: nova/api/openstack/compute/plugins/v3/servers.py:419
+#: nova/api/openstack/compute/servers.py:790
+#: nova/api/openstack/compute/plugins/v3/servers.py:443
msgid "Server name is not defined"
msgstr "El nombre de servidor no está definido "
-#: nova/api/openstack/compute/servers.py:840
-#: nova/api/openstack/compute/servers.py:968
+#: nova/api/openstack/compute/servers.py:842
+#: nova/api/openstack/compute/servers.py:970
msgid "Invalid flavorRef provided."
msgstr "Se ha proporcionado flavorRef no válido. "
-#: nova/api/openstack/compute/servers.py:880
+#: nova/api/openstack/compute/servers.py:882
msgid ""
"Using different block_device_mapping syntaxes is not allowed in the same "
"request."
@@ -2969,236 +2904,181 @@ msgstr ""
"El uso de sintáxis diferentes de block_device_mapping en la misma "
"petición no está permitido."
-#: nova/api/openstack/compute/servers.py:965
-#: nova/api/openstack/compute/plugins/v3/servers.py:495
+#: nova/api/openstack/compute/servers.py:967
+#: nova/api/openstack/compute/plugins/v3/servers.py:519
msgid "Can not find requested image"
msgstr "No se puede encontrar la imagen solicitada "
-#: nova/api/openstack/compute/servers.py:971
-#: nova/api/openstack/compute/plugins/v3/servers.py:501
+#: nova/api/openstack/compute/servers.py:973
+#: nova/api/openstack/compute/plugins/v3/servers.py:525
msgid "Invalid key_name provided."
msgstr "Se ha proporcionado un nombre de clave no válido."
-#: nova/api/openstack/compute/servers.py:974
-#: nova/api/openstack/compute/plugins/v3/servers.py:504
+#: nova/api/openstack/compute/servers.py:976
+#: nova/api/openstack/compute/plugins/v3/servers.py:528
msgid "Invalid config_drive provided."
msgstr "La config_drive proporcionada es inválida."
-#: nova/api/openstack/compute/servers.py:1064
+#: nova/api/openstack/compute/servers.py:1063
msgid "HostId cannot be updated."
msgstr "El ID de host no se puede actualizar. "
-#: nova/api/openstack/compute/servers.py:1068
+#: nova/api/openstack/compute/servers.py:1067
msgid "Personality cannot be updated."
msgstr "No se puede actualizar la personalidad."
-#: nova/api/openstack/compute/servers.py:1094
-#: nova/api/openstack/compute/servers.py:1113
-#: nova/api/openstack/compute/plugins/v3/servers.py:626
-#: nova/api/openstack/compute/plugins/v3/servers.py:642
+#: nova/api/openstack/compute/servers.py:1093
+#: nova/api/openstack/compute/servers.py:1112
+#: nova/api/openstack/compute/plugins/v3/servers.py:662
+#: nova/api/openstack/compute/plugins/v3/servers.py:679
msgid "Instance has not been resized."
msgstr "La instancia no se ha redimensionado."
-#: nova/api/openstack/compute/servers.py:1116
-#: nova/api/openstack/compute/plugins/v3/servers.py:645
+#: nova/api/openstack/compute/servers.py:1115
+#: nova/api/openstack/compute/plugins/v3/servers.py:682
msgid "Flavor used by the instance could not be found."
msgstr "No se ha podido encontrar el sabor utilizado por la instancia."
-#: nova/api/openstack/compute/servers.py:1132
-#: nova/api/openstack/compute/plugins/v3/servers.py:659
+#: nova/api/openstack/compute/servers.py:1131
+#: nova/api/openstack/compute/plugins/v3/servers.py:697
msgid "Argument 'type' for reboot must be a string"
msgstr "El argumento 'type' para reinicio debe ser una cadena"
-#: nova/api/openstack/compute/servers.py:1138
-#: nova/api/openstack/compute/plugins/v3/servers.py:665
+#: nova/api/openstack/compute/servers.py:1137
+#: nova/api/openstack/compute/plugins/v3/servers.py:703
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr "El argumento 'type' para el rearranque no es HARD o SOFT"
-#: nova/api/openstack/compute/servers.py:1142
-#: nova/api/openstack/compute/plugins/v3/servers.py:669
+#: nova/api/openstack/compute/servers.py:1141
+#: nova/api/openstack/compute/plugins/v3/servers.py:707
msgid "Missing argument 'type' for reboot"
msgstr "Falta el argumento 'type' para el rearranque"
-#: nova/api/openstack/compute/servers.py:1169
-#: nova/api/openstack/compute/plugins/v3/servers.py:697
+#: nova/api/openstack/compute/servers.py:1168
+#: nova/api/openstack/compute/plugins/v3/servers.py:735
msgid "Unable to locate requested flavor."
msgstr "No se puede ubicar el tipo solicitado."
-#: nova/api/openstack/compute/servers.py:1172
-#: nova/api/openstack/compute/plugins/v3/servers.py:700
+#: nova/api/openstack/compute/servers.py:1171
+#: nova/api/openstack/compute/plugins/v3/servers.py:738
msgid "Resize requires a flavor change."
msgstr "Redimensionar necesita un cambio de modelo. "
-#: nova/api/openstack/compute/servers.py:1180
-#: nova/api/openstack/compute/plugins/v3/servers.py:708
+#: nova/api/openstack/compute/servers.py:1181
+#: nova/api/openstack/compute/plugins/v3/servers.py:748
msgid "You are not authorized to access the image the instance was started with."
msgstr ""
"No está autorizado a acceder a la imagen con la que se ha lanzado la "
"instancia."
-#: nova/api/openstack/compute/servers.py:1184
-#: nova/api/openstack/compute/plugins/v3/servers.py:712
+#: nova/api/openstack/compute/servers.py:1185
+#: nova/api/openstack/compute/plugins/v3/servers.py:752
msgid "Image that the instance was started with could not be found."
msgstr "No se ha podido encontrar la imagen con la que se lanzó la instancia."
-#: nova/api/openstack/compute/servers.py:1188
-#: nova/api/openstack/compute/plugins/v3/servers.py:716
+#: nova/api/openstack/compute/servers.py:1189
+#: nova/api/openstack/compute/plugins/v3/servers.py:756
msgid "Invalid instance image."
msgstr "Imagen de instancia no válida."
-#: nova/api/openstack/compute/servers.py:1211
+#: nova/api/openstack/compute/servers.py:1215
msgid "Missing imageRef attribute"
msgstr "Falta el atributo imageRef"
-#: nova/api/openstack/compute/servers.py:1216
-#: nova/api/openstack/compute/servers.py:1224
+#: nova/api/openstack/compute/servers.py:1220
+#: nova/api/openstack/compute/servers.py:1228
msgid "Invalid imageRef provided."
msgstr "Se ha proporcionado una referencia de imagen no válida."
-#: nova/api/openstack/compute/servers.py:1254
+#: nova/api/openstack/compute/servers.py:1258
msgid "Missing flavorRef attribute"
msgstr "Falta el atributo flavorRef"
-#: nova/api/openstack/compute/servers.py:1267
+#: nova/api/openstack/compute/servers.py:1271
msgid "No adminPass was specified"
msgstr "No se ha especificado adminPass"
-#: nova/api/openstack/compute/servers.py:1275
+#: nova/api/openstack/compute/servers.py:1279
#: nova/api/openstack/compute/plugins/v3/admin_password.py:56
msgid "Unable to set password on instance"
msgstr "No se puede establecer contraseña en la instancia"
-#: nova/api/openstack/compute/servers.py:1284
+#: nova/api/openstack/compute/servers.py:1288
msgid "Unable to parse metadata key/value pairs."
msgstr "No se han podido analizar pares de clave/valor de metadatos."
-#: nova/api/openstack/compute/servers.py:1297
+#: nova/api/openstack/compute/servers.py:1301
msgid "Resize request has invalid 'flavorRef' attribute."
msgstr ""
"La solicitud de redimensionamiento tiene un atributo 'flavorRef' no "
"válido."
-#: nova/api/openstack/compute/servers.py:1300
+#: nova/api/openstack/compute/servers.py:1304
msgid "Resize requests require 'flavorRef' attribute."
msgstr "Las solicitudes de redimensionamiento necesitan el atributo 'flavorRef'. "
-#: nova/api/openstack/compute/servers.py:1320
+#: nova/api/openstack/compute/servers.py:1324
msgid "Could not parse imageRef from request."
msgstr "No se ha podido analizar imageRef de la solicitud. "
-#: nova/api/openstack/compute/servers.py:1390
-#: nova/api/openstack/compute/plugins/v3/servers.py:852
+#: nova/api/openstack/compute/servers.py:1394
+#: nova/api/openstack/compute/plugins/v3/servers.py:895
msgid "Cannot find image for rebuild"
msgstr "No se puede encontrar la imagen para reconstrucción "
-#: nova/api/openstack/compute/servers.py:1423
+#: nova/api/openstack/compute/servers.py:1428
msgid "createImage entity requires name attribute"
msgstr "La entidad createImage necesita el atributo de nombre"
-#: nova/api/openstack/compute/servers.py:1432
-#: nova/api/openstack/compute/contrib/admin_actions.py:286
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:85
-#: nova/api/openstack/compute/plugins/v3/servers.py:892
+#: nova/api/openstack/compute/servers.py:1437
+#: nova/api/openstack/compute/contrib/admin_actions.py:283
+#: nova/api/openstack/compute/plugins/v3/servers.py:936
msgid "Invalid metadata"
msgstr "Metadatos no válidos"
-#: nova/api/openstack/compute/servers.py:1490
+#: nova/api/openstack/compute/servers.py:1495
msgid "Invalid adminPass"
msgstr "adminPass no válido "
-#: nova/api/openstack/compute/contrib/admin_actions.py:63
-#: nova/api/openstack/compute/contrib/admin_actions.py:88
-#: nova/api/openstack/compute/contrib/admin_actions.py:113
-#: nova/api/openstack/compute/contrib/admin_actions.py:135
-#: nova/api/openstack/compute/contrib/admin_actions.py:176
-#: nova/api/openstack/compute/contrib/admin_actions.py:195
-#: nova/api/openstack/compute/contrib/admin_actions.py:214
-#: nova/api/openstack/compute/contrib/admin_actions.py:233
-#: nova/api/openstack/compute/contrib/admin_actions.py:391
-#: nova/api/openstack/compute/contrib/multinic.py:43
+#: nova/api/openstack/compute/contrib/admin_actions.py:64
+#: nova/api/openstack/compute/contrib/admin_actions.py:86
+#: nova/api/openstack/compute/contrib/admin_actions.py:108
+#: nova/api/openstack/compute/contrib/admin_actions.py:130
+#: nova/api/openstack/compute/contrib/admin_actions.py:173
+#: nova/api/openstack/compute/contrib/admin_actions.py:192
+#: nova/api/openstack/compute/contrib/admin_actions.py:211
+#: nova/api/openstack/compute/contrib/admin_actions.py:230
+#: nova/api/openstack/compute/contrib/admin_actions.py:388
+#: nova/api/openstack/compute/contrib/multinic.py:44
#: nova/api/openstack/compute/contrib/rescue.py:45
#: nova/api/openstack/compute/contrib/shelve.py:43
msgid "Server not found"
msgstr "Servidor no encontrado"
-#: nova/api/openstack/compute/contrib/admin_actions.py:66
-msgid "Virt driver does not implement pause function."
-msgstr "El controlador Virt no implementa la función de pausa."
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:70
-#, python-format
-msgid "Compute.api::pause %s"
-msgstr "Compute.api::pause %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:91
-msgid "Virt driver does not implement unpause function."
-msgstr "El controlador Virt no implementa una función de unpause."
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:95
-#, python-format
-msgid "Compute.api::unpause %s"
-msgstr "Compute.api::unpause %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:117
-#, python-format
-msgid "compute.api::suspend %s"
-msgstr "compute.api::suspend %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:139
-#, python-format
-msgid "compute.api::resume %s"
-msgstr "compute.api::resume %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:163
-#, python-format
-msgid "Error in migrate %s"
-msgstr "Error al migrar %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:182
-#, python-format
-msgid "Compute.api::reset_network %s"
-msgstr "Compute.api::reset_network %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:201
-#, python-format
-msgid "Compute.api::inject_network_info %s"
-msgstr "Compute.api::inject_network_info %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:218
-#, python-format
-msgid "Compute.api::lock %s"
-msgstr "Compute.api::lock %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:237
-#, python-format
-msgid "Compute.api::unlock %s"
-msgstr "Compute.api::unlock %s"
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:263
+#: nova/api/openstack/compute/contrib/admin_actions.py:260
#, python-format
msgid "createBackup entity requires %s attribute"
msgstr "La entidad createBackup necesita el atributo %s"
-#: nova/api/openstack/compute/contrib/admin_actions.py:267
+#: nova/api/openstack/compute/contrib/admin_actions.py:264
msgid "Malformed createBackup entity"
msgstr "Entidad createBackup formada incorrectamente"
-#: nova/api/openstack/compute/contrib/admin_actions.py:273
+#: nova/api/openstack/compute/contrib/admin_actions.py:270
msgid "createBackup attribute 'rotation' must be an integer"
msgstr "La 'rotación' del atributo createBackup debe ser un entero"
-#: nova/api/openstack/compute/contrib/admin_actions.py:276
+#: nova/api/openstack/compute/contrib/admin_actions.py:273
msgid "createBackup attribute 'rotation' must be greater than or equal to zero"
msgstr "El atributo de createBackup 'rotation' debe ser mayor que o igual a cero"
-#: nova/api/openstack/compute/contrib/admin_actions.py:292
-#: nova/api/openstack/compute/contrib/console_output.py:45
+#: nova/api/openstack/compute/contrib/admin_actions.py:289
+#: nova/api/openstack/compute/contrib/console_output.py:46
#: nova/api/openstack/compute/contrib/server_start_stop.py:40
msgid "Instance not found"
msgstr "No se ha encontrado la instancia "
-#: nova/api/openstack/compute/contrib/admin_actions.py:323
-#: nova/api/openstack/compute/plugins/v3/migrate_server.py:80
+#: nova/api/openstack/compute/contrib/admin_actions.py:320
msgid ""
"host, block_migration and disk_over_commit must be specified for live "
"migration."
@@ -3206,74 +3086,69 @@ msgstr ""
"host, block_migration y disk_over_commit deben especificarse para "
"migración en vivo."
-#: nova/api/openstack/compute/contrib/admin_actions.py:360
+#: nova/api/openstack/compute/contrib/admin_actions.py:357
#, python-format
msgid "Live migration of instance %s to another host failed"
msgstr "Ha fallado la migración en vivo de la instancia %s a otro host"
-#: nova/api/openstack/compute/contrib/admin_actions.py:363
+#: nova/api/openstack/compute/contrib/admin_actions.py:360
#, python-format
msgid "Live migration of instance %(id)s to host %(host)s failed"
msgstr "La migración en directo de la instancia %(id)s al host %(host)s ha fallado"
-#: nova/api/openstack/compute/contrib/admin_actions.py:381
-#: nova/api/openstack/compute/plugins/v3/admin_actions.py:83
+#: nova/api/openstack/compute/contrib/admin_actions.py:378
#, python-format
msgid "Desired state must be specified. Valid states are: %s"
msgstr "Se debe especificar el estado deseado. Los estados válidos son: %s"
-#: nova/api/openstack/compute/contrib/admin_actions.py:395
-#, python-format
-msgid "Compute.api::resetState %s"
-msgstr "Compute.api::resetState %s"
-
-#: nova/api/openstack/compute/contrib/aggregates.py:99
+#: nova/api/openstack/compute/contrib/agents.py:100
+#: nova/api/openstack/compute/contrib/agents.py:118
+#: nova/api/openstack/compute/contrib/agents.py:156
+#: nova/api/openstack/compute/contrib/cloudpipe_update.py:55
#, python-format
-msgid "Cannot show aggregate: %s"
-msgstr "No se puede mostrar el agregado: %s"
+msgid "Invalid request body: %s"
+msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:137
-#, python-format
-msgid "Cannot update aggregate: %s"
-msgstr "No se puede actualizar el agregado: %s"
+#: nova/api/openstack/compute/contrib/aggregates.py:39
+msgid "Only host parameter can be specified"
+msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:151
-#, python-format
-msgid "Cannot delete aggregate: %s"
-msgstr "No se puede eliminar el agregado: %s"
+#: nova/api/openstack/compute/contrib/aggregates.py:42
+msgid "Host parameter must be specified"
+msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:162
+#: nova/api/openstack/compute/contrib/aggregates.py:168
#, python-format
msgid "Aggregates does not have %s action"
msgstr "Los agregados no tienen la acción %s "
-#: nova/api/openstack/compute/contrib/aggregates.py:166
+#: nova/api/openstack/compute/contrib/aggregates.py:172
#: nova/api/openstack/compute/contrib/flavormanage.py:55
#: nova/api/openstack/compute/contrib/keypairs.py:86
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:167
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:169
msgid "Invalid request body"
msgstr "Cuerpo de solicitud no válido"
-#: nova/api/openstack/compute/contrib/aggregates.py:176
-#: nova/api/openstack/compute/contrib/aggregates.py:181
+#: nova/api/openstack/compute/contrib/aggregates.py:182
+#: nova/api/openstack/compute/contrib/aggregates.py:187
#, python-format
msgid "Cannot add host %(host)s in aggregate %(id)s"
msgstr "No se puede añadir el host %(host)s en el agregado %(id)s"
-#: nova/api/openstack/compute/contrib/aggregates.py:195
-#: nova/api/openstack/compute/contrib/aggregates.py:199
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:151
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:155
+#: nova/api/openstack/compute/contrib/aggregates.py:201
+#: nova/api/openstack/compute/contrib/aggregates.py:205
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:153
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:157
#, python-format
msgid "Cannot remove host %(host)s in aggregate %(id)s"
msgstr "No se puede eliminar el host %(host)s en el agregado %(id)s"
-#: nova/api/openstack/compute/contrib/aggregates.py:218
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:175
+#: nova/api/openstack/compute/contrib/aggregates.py:224
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:177
msgid "The value of metadata must be a dict"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:230
+#: nova/api/openstack/compute/contrib/aggregates.py:237
#, python-format
msgid "Cannot set metadata %(metadata)s in aggregate %(id)s"
msgstr "No se pueden establecer metadatos %(metadata)s en el agregado %(id)s"
@@ -3289,28 +3164,28 @@ msgstr "Se ha creado instantánea asistida del volúmen %s"
msgid "Delete snapshot with id: %s"
msgstr "Suprimir instantánea con el ID: %s"
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:104
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:103
msgid "Attach interface"
msgstr "Conectar interfaz"
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:119
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:154
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:177
-#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:166
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:116
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:145
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:166
+#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:174
+#: nova/network/security_group/neutron_driver.py:510
+#: nova/network/security_group/neutron_driver.py:514
+#: nova/network/security_group/neutron_driver.py:518
+#: nova/network/security_group/neutron_driver.py:522
+#: nova/network/security_group/neutron_driver.py:526
msgid "Network driver does not support this function."
msgstr "El controlador de red no soporta esta función."
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:123
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:120
msgid "Failed to attach interface"
msgstr "Se ha encontrado un error al conectar la interfaz."
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:130
-#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:128
-msgid "Attachments update is not supported"
-msgstr "La actualización de dispositivos conectados no está soportada"
-
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:142
-#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:139
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:136
+#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:144
#, python-format
msgid "Detach interface %s"
msgstr "Desconectar interfaz %s"
@@ -3324,40 +3199,33 @@ msgstr "Se debe especificar la dirección en la forma xx:xx:xx:xx:xx:xx:xx"
msgid "Must specify id or address"
msgstr "Debe especificar id o dirección"
-#: nova/api/openstack/compute/contrib/cells.py:252
+#: nova/api/openstack/compute/contrib/cells.py:250
#, python-format
msgid "Cell %(id)s not found."
msgstr "No se ha encontrado Cell %(id)s."
-#: nova/api/openstack/compute/contrib/cells.py:285
-#: nova/api/openstack/compute/plugins/v3/cells.py:192
+#: nova/api/openstack/compute/contrib/cells.py:286
msgid "Cell name cannot be empty"
msgstr "El nombre de célula no puede estar vacío"
#: nova/api/openstack/compute/contrib/cells.py:289
-#: nova/api/openstack/compute/plugins/v3/cells.py:196
msgid "Cell name cannot contain '!' or '.'"
msgstr "El nombre de célula no puede contener '!' o '.'"
-#: nova/api/openstack/compute/contrib/cells.py:296
-#: nova/api/openstack/compute/plugins/v3/cells.py:203
+#: nova/api/openstack/compute/contrib/cells.py:295
msgid "Cell type must be 'parent' or 'child'"
msgstr "El tipo de célula debe ser 'padre' o 'hijo'"
-#: nova/api/openstack/compute/contrib/cells.py:352
-#: nova/api/openstack/compute/contrib/cells.py:376
-#: nova/api/openstack/compute/plugins/v3/cells.py:259
-#: nova/api/openstack/compute/plugins/v3/cells.py:282
+#: nova/api/openstack/compute/contrib/cells.py:353
+#: nova/api/openstack/compute/contrib/cells.py:378
msgid "No cell information in request"
msgstr "No hay información de célula en la solicitud"
#: nova/api/openstack/compute/contrib/cells.py:357
-#: nova/api/openstack/compute/plugins/v3/cells.py:264
msgid "No cell name in request"
msgstr "No hay ningún nombre de célula en la solicitud"
-#: nova/api/openstack/compute/contrib/cells.py:411
-#: nova/api/openstack/compute/plugins/v3/cells.py:319
+#: nova/api/openstack/compute/contrib/cells.py:415
msgid "Only 'updated_since', 'project_id' and 'deleted' are understood."
msgstr "Solamente 'updated_since', 'project_id' y 'deleted' son entendidos."
@@ -3389,21 +3257,21 @@ msgstr "Token no encontrado"
msgid "The requested console type details are not accessible"
msgstr "Los detalles del tipo de consola solicitada no son accesibles"
-#: nova/api/openstack/compute/contrib/console_output.py:51
+#: nova/api/openstack/compute/contrib/console_output.py:52
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr ""
"os-getConsoleOutput formada incorrectamente u omitida en el cuerpo de "
"solicitud"
-#: nova/api/openstack/compute/contrib/console_output.py:62
+#: nova/api/openstack/compute/contrib/console_output.py:63
msgid "Length in request body must be an integer value"
msgstr "La longitud del cuerpo de solicitud debe ser un valor entero "
-#: nova/api/openstack/compute/contrib/console_output.py:70
+#: nova/api/openstack/compute/contrib/console_output.py:71
msgid "Unable to get console"
msgstr "No se puede obtener consola "
-#: nova/api/openstack/compute/contrib/console_output.py:75
+#: nova/api/openstack/compute/contrib/console_output.py:76
#: nova/api/openstack/compute/plugins/v3/console_output.py:60
msgid "Unable to get console log, functionality not implemented"
msgstr ""
@@ -3415,17 +3283,17 @@ msgid "Instance not yet ready"
msgstr "La instancia aún no está preparada"
#: nova/api/openstack/compute/contrib/consoles.py:52
-#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:62
+#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:60
msgid "Unable to get vnc console, functionality not implemented"
msgstr "Incapaz de obtener consola vnc, funcionalidad no implementada"
#: nova/api/openstack/compute/contrib/consoles.py:76
-#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:93
+#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:89
msgid "Unable to get spice console, functionality not implemented"
msgstr "Incapaz de obtener la consola spice, funcionalidad no implementada"
#: nova/api/openstack/compute/contrib/consoles.py:101
-#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:127
+#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:121
msgid "Unable to get rdp console, functionality not implemented"
msgstr "Incapaz de obtener consola rdp, funcionalidad no implementada"
@@ -3434,23 +3302,27 @@ msgstr "Incapaz de obtener consola rdp, funcionalidad no implementada"
msgid "%s must be either 'MANUAL' or 'AUTO'."
msgstr "%s debe ser 'MANUAL' o 'AUTO'."
-#: nova/api/openstack/compute/contrib/evacuate.py:53
-msgid "host and onSharedStorage must be specified."
-msgstr "Se deben especificar host y onSharedStorage"
+#: nova/api/openstack/compute/contrib/evacuate.py:54
+msgid "host must be specified."
+msgstr ""
#: nova/api/openstack/compute/contrib/evacuate.py:61
+msgid "onSharedStorage must be specified."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/evacuate.py:69
#: nova/api/openstack/compute/plugins/v3/evacuate.py:67
msgid "admin password can't be changed on existing disk"
msgstr "No se puede cambiar la contraseña de administrador en el disco existente"
-#: nova/api/openstack/compute/contrib/evacuate.py:71
-#: nova/api/openstack/compute/plugins/v3/evacuate.py:77
+#: nova/api/openstack/compute/contrib/evacuate.py:80
+#: nova/api/openstack/compute/plugins/v3/evacuate.py:78
#, python-format
msgid "Compute host %s not found."
msgstr "No se ha encontrado Compute host %s."
-#: nova/api/openstack/compute/contrib/evacuate.py:77
-#: nova/api/openstack/compute/plugins/v3/evacuate.py:83
+#: nova/api/openstack/compute/contrib/evacuate.py:86
+#: nova/api/openstack/compute/plugins/v3/evacuate.py:84
msgid "The target host can't be the same one."
msgstr ""
@@ -3477,8 +3349,12 @@ msgstr "La lista de acceso no está disponible para sabores públicos. "
msgid "No request body"
msgstr "Ningún cuerpo de solicitud "
+#: nova/api/openstack/compute/contrib/flavor_access.py:170
+#: nova/api/openstack/compute/contrib/flavor_access.py:194
+msgid "Missing tenant parameter"
+msgstr ""
+
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:56
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:42
msgid "No Request Body"
msgstr "Ningún cuerpo de solicitud"
@@ -3488,8 +3364,8 @@ msgstr "Se han proporcionado extra_specs incorrectas"
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:134
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:150
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:113
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:132
+#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:96
+#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:115
#, python-format
msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s."
msgstr ""
@@ -3498,79 +3374,88 @@ msgstr ""
msgid "DNS entries not found."
msgstr "No se han encontrado entradas DNS."
-#: nova/api/openstack/compute/contrib/floating_ips.py:129
-#: nova/api/openstack/compute/contrib/floating_ips.py:177
+#: nova/api/openstack/compute/contrib/floating_ips.py:130
+#: nova/api/openstack/compute/contrib/floating_ips.py:186
#, python-format
msgid "Floating ip not found for id %s"
msgstr "No se ha encontrado la IP flotante para el id %s."
-#: nova/api/openstack/compute/contrib/floating_ips.py:162
+#: nova/api/openstack/compute/contrib/floating_ips.py:163
#, python-format
msgid "No more floating ips in pool %s."
msgstr "No hay más IP flotantes en la agrupación %s."
-#: nova/api/openstack/compute/contrib/floating_ips.py:164
+#: nova/api/openstack/compute/contrib/floating_ips.py:165
msgid "No more floating ips available."
msgstr "No hay más IP flotantes disponibles."
-#: nova/api/openstack/compute/contrib/floating_ips.py:218
-#: nova/api/openstack/compute/contrib/floating_ips.py:283
-#: nova/api/openstack/compute/contrib/security_groups.py:481
+#: nova/api/openstack/compute/contrib/floating_ips.py:169
+#, python-format
+msgid "IP allocation over quota in pool %s."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:171
+msgid "IP allocation over quota."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:223
+#: nova/api/openstack/compute/contrib/floating_ips.py:288
+#: nova/api/openstack/compute/contrib/security_groups.py:488
msgid "Missing parameter dict"
msgstr "Falta el parámetro dict "
-#: nova/api/openstack/compute/contrib/floating_ips.py:221
-#: nova/api/openstack/compute/contrib/floating_ips.py:286
+#: nova/api/openstack/compute/contrib/floating_ips.py:226
+#: nova/api/openstack/compute/contrib/floating_ips.py:291
msgid "Address not specified"
msgstr "Dirección no especificada "
-#: nova/api/openstack/compute/contrib/floating_ips.py:227
+#: nova/api/openstack/compute/contrib/floating_ips.py:232
msgid "No nw_info cache associated with instance"
msgstr "No hay memoria caché nw_info asociada con la instancia "
-#: nova/api/openstack/compute/contrib/floating_ips.py:232
+#: nova/api/openstack/compute/contrib/floating_ips.py:237
msgid "No fixed ips associated to instance"
msgstr "No hay IP fijas asociadas a la instancia "
-#: nova/api/openstack/compute/contrib/floating_ips.py:243
+#: nova/api/openstack/compute/contrib/floating_ips.py:248
msgid "Specified fixed address not assigned to instance"
msgstr "Dirección fija especificada no asignada a la instancia"
-#: nova/api/openstack/compute/contrib/floating_ips.py:257
+#: nova/api/openstack/compute/contrib/floating_ips.py:262
msgid "floating ip is already associated"
msgstr "La IP flotante ya está asociada"
-#: nova/api/openstack/compute/contrib/floating_ips.py:260
+#: nova/api/openstack/compute/contrib/floating_ips.py:265
msgid "l3driver call to add floating ip failed"
msgstr "La llamada l3driver para añadir IP flotante ha fallado"
-#: nova/api/openstack/compute/contrib/floating_ips.py:263
-#: nova/api/openstack/compute/contrib/floating_ips.py:294
+#: nova/api/openstack/compute/contrib/floating_ips.py:268
+#: nova/api/openstack/compute/contrib/floating_ips.py:299
msgid "floating ip not found"
msgstr "No se ha encontrado IP flotante"
-#: nova/api/openstack/compute/contrib/floating_ips.py:268
+#: nova/api/openstack/compute/contrib/floating_ips.py:273
msgid "Error. Unable to associate floating ip"
msgstr "Error. No se puede asociar IP flotante"
-#: nova/api/openstack/compute/contrib/floating_ips.py:309
+#: nova/api/openstack/compute/contrib/floating_ips.py:314
msgid "Floating ip is not associated"
msgstr "La ip flotante no está asociada "
-#: nova/api/openstack/compute/contrib/floating_ips.py:313
+#: nova/api/openstack/compute/contrib/floating_ips.py:318
#, python-format
msgid "Floating ip %(address)s is not associated with instance %(id)s."
msgstr ""
"La dirección IP flotante %(address)s no está asociada con la instancia "
"%(id)s."
-#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:118
+#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:116
#: nova/api/openstack/compute/contrib/services.py:173
#: nova/api/openstack/compute/plugins/v3/services.py:124
msgid "Unknown action"
msgstr "Acción desconocida"
-#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:146
+#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:144
#: nova/cmd/manage.py:417
#, python-format
msgid "/%s should be specified as single address(es) not in cidr format"
@@ -3582,83 +3467,79 @@ msgstr ""
msgid "fping utility is not found."
msgstr "No se encuentra el programa de utilidad fping."
-#: nova/api/openstack/compute/contrib/hosts.py:183
-#: nova/api/openstack/compute/plugins/v3/hosts.py:128
+#: nova/api/openstack/compute/contrib/hosts.py:185
#, python-format
msgid "Invalid update setting: '%s'"
msgstr "Valor de actualización no válido: '%s' "
-#: nova/api/openstack/compute/contrib/hosts.py:186
-#: nova/api/openstack/compute/plugins/v3/hosts.py:131
+#: nova/api/openstack/compute/contrib/hosts.py:188
#, python-format
msgid "Invalid status: '%s'"
msgstr "Estado no válido: '%s' "
-#: nova/api/openstack/compute/contrib/hosts.py:188
-#: nova/api/openstack/compute/plugins/v3/hosts.py:133
+#: nova/api/openstack/compute/contrib/hosts.py:190
#, python-format
msgid "Invalid mode: '%s'"
msgstr "Modalidad no válida: '%s' "
-#: nova/api/openstack/compute/contrib/hosts.py:190
-#: nova/api/openstack/compute/plugins/v3/hosts.py:135
+#: nova/api/openstack/compute/contrib/hosts.py:192
msgid "'status' or 'maintenance_mode' needed for host update"
msgstr "Se necesita 'status' o 'maintenance_mode' para actualización de host"
-#: nova/api/openstack/compute/contrib/hosts.py:206
-#: nova/api/openstack/compute/plugins/v3/hosts.py:152
+#: nova/api/openstack/compute/contrib/hosts.py:208
+#: nova/api/openstack/compute/plugins/v3/hosts.py:135
#, python-format
msgid "Putting host %(host_name)s in maintenance mode %(mode)s."
msgstr "Poniendo el host %(host_name)s en modalidad de mantenimiento %(mode)s."
-#: nova/api/openstack/compute/contrib/hosts.py:212
-#: nova/api/openstack/compute/plugins/v3/hosts.py:158
+#: nova/api/openstack/compute/contrib/hosts.py:214
+#: nova/api/openstack/compute/plugins/v3/hosts.py:141
msgid "Virt driver does not implement host maintenance mode."
msgstr "El controlador virt no implementa la modalidad de mantenimiento de host."
-#: nova/api/openstack/compute/contrib/hosts.py:227
-#: nova/api/openstack/compute/plugins/v3/hosts.py:174
+#: nova/api/openstack/compute/contrib/hosts.py:229
+#: nova/api/openstack/compute/plugins/v3/hosts.py:157
#, python-format
msgid "Enabling host %s."
msgstr "Habilitando el host %s."
-#: nova/api/openstack/compute/contrib/hosts.py:229
-#: nova/api/openstack/compute/plugins/v3/hosts.py:176
+#: nova/api/openstack/compute/contrib/hosts.py:231
+#: nova/api/openstack/compute/plugins/v3/hosts.py:159
#, python-format
msgid "Disabling host %s."
msgstr "Inhabilitando el host %s."
-#: nova/api/openstack/compute/contrib/hosts.py:234
-#: nova/api/openstack/compute/plugins/v3/hosts.py:181
+#: nova/api/openstack/compute/contrib/hosts.py:236
+#: nova/api/openstack/compute/plugins/v3/hosts.py:164
msgid "Virt driver does not implement host disabled status."
msgstr "El controlador virt no implementa el estado inhabilitado de host."
-#: nova/api/openstack/compute/contrib/hosts.py:250
-#: nova/api/openstack/compute/plugins/v3/hosts.py:199
+#: nova/api/openstack/compute/contrib/hosts.py:252
+#: nova/api/openstack/compute/plugins/v3/hosts.py:182
msgid "Virt driver does not implement host power management."
msgstr "El controlador virt no implementa la gestión de alimentación de host."
-#: nova/api/openstack/compute/contrib/hosts.py:336
-#: nova/api/openstack/compute/plugins/v3/hosts.py:292
+#: nova/api/openstack/compute/contrib/hosts.py:338
+#: nova/api/openstack/compute/plugins/v3/hosts.py:275
msgid "Describe-resource is admin only functionality"
msgstr "El recurso de descripción es funcionalidad sólo de administrador"
-#: nova/api/openstack/compute/contrib/hypervisors.py:193
-#: nova/api/openstack/compute/contrib/hypervisors.py:205
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:93
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:105
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:140
+#: nova/api/openstack/compute/contrib/hypervisors.py:208
+#: nova/api/openstack/compute/contrib/hypervisors.py:220
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:100
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:112
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:147
#, python-format
msgid "Hypervisor with ID '%s' could not be found."
msgstr "El hipervisor con el ID '%s' no se ha podido encontrar. "
-#: nova/api/openstack/compute/contrib/hypervisors.py:213
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:113
+#: nova/api/openstack/compute/contrib/hypervisors.py:228
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:120
msgid "Virt driver does not implement uptime function."
msgstr "El controlador virt no implementa la función uptime."
-#: nova/api/openstack/compute/contrib/hypervisors.py:229
-#: nova/api/openstack/compute/contrib/hypervisors.py:239
+#: nova/api/openstack/compute/contrib/hypervisors.py:244
+#: nova/api/openstack/compute/contrib/hypervisors.py:254
#, python-format
msgid "No hypervisor matching '%s' could be found."
msgstr "No es ha podido encontrar ningún hipervisor que coincida con '%s'. "
@@ -3673,27 +3554,22 @@ msgstr "Indicación de fecha y hora no válida para la fecha %s"
msgid "Quota exceeded, too many key pairs."
msgstr "Cuota superada, demasiados pares de claves."
-#: nova/api/openstack/compute/contrib/multinic.py:54
+#: nova/api/openstack/compute/contrib/multinic.py:55
msgid "Missing 'networkId' argument for addFixedIp"
msgstr "Falta el argumento 'networkId' para addFixedIp"
-#: nova/api/openstack/compute/contrib/multinic.py:70
+#: nova/api/openstack/compute/contrib/multinic.py:75
msgid "Missing 'address' argument for removeFixedIp"
msgstr "Falta el argumento 'address' para removeFixedIp "
-#: nova/api/openstack/compute/contrib/multinic.py:80
-#, python-format
-msgid "Unable to find address %r"
-msgstr "No se puede encontrar la dirección %r"
-
#: nova/api/openstack/compute/contrib/networks_associate.py:40
#: nova/api/openstack/compute/contrib/networks_associate.py:56
#: nova/api/openstack/compute/contrib/networks_associate.py:74
-#: nova/api/openstack/compute/contrib/os_networks.py:78
-#: nova/api/openstack/compute/contrib/os_networks.py:93
-#: nova/api/openstack/compute/contrib/os_networks.py:106
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:110
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:137
+#: nova/api/openstack/compute/contrib/os_networks.py:79
+#: nova/api/openstack/compute/contrib/os_networks.py:94
+#: nova/api/openstack/compute/contrib/os_networks.py:107
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:112
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:144
msgid "Network not found"
msgstr "No se ha encontrado la red"
@@ -3715,68 +3591,54 @@ msgstr ""
"La asociación de anfitrión no está implementada por la API de red "
"configurada"
-#: nova/api/openstack/compute/contrib/os_networks.py:81
+#: nova/api/openstack/compute/contrib/os_networks.py:82
msgid "Disassociate network is not implemented by the configured Network API"
msgstr "La desasociación de red no está implementada por la API de red configurada"
-#: nova/api/openstack/compute/contrib/os_networks.py:100
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125
-#, python-format
-msgid "Deleting network with id %s"
-msgstr "Suprimiendo red con el id %s"
-
-#: nova/api/openstack/compute/contrib/os_networks.py:118
+#: nova/api/openstack/compute/contrib/os_networks.py:119
msgid "Missing network in body"
msgstr "Falta red en el cuerpo"
-#: nova/api/openstack/compute/contrib/os_networks.py:122
+#: nova/api/openstack/compute/contrib/os_networks.py:123
msgid "Network label is required"
msgstr "Se necesita etiqueta de red"
-#: nova/api/openstack/compute/contrib/os_networks.py:126
+#: nova/api/openstack/compute/contrib/os_networks.py:127
msgid "Network cidr or cidr_v6 is required"
msgstr "Se necesita la red cidr o cidr_v6"
-#: nova/api/openstack/compute/contrib/os_networks.py:152
+#: nova/api/openstack/compute/contrib/os_networks.py:153
msgid "VLAN support must be enabled"
msgstr "El soporte de VLAN debe estar habilitado."
-#: nova/api/openstack/compute/contrib/os_networks.py:155
+#: nova/api/openstack/compute/contrib/os_networks.py:156
#, python-format
msgid "Cannot associate network %(network)s with project %(project)s: %(message)s"
msgstr ""
"No se puede asociar la red %(network)s con el proyecto %(project)s: "
"%(message)s"
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:83
-msgid "Failed to get default networks"
-msgstr "Fallo al obtener las redes predeterminadas"
-
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:122
-msgid "Failed to update usages deallocating network."
-msgstr "No se han podido actualizar los usos desasignando la red."
-
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:157
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:168
msgid "No CIDR requested"
msgstr "No se ha solicitado ningún CIDR"
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:163
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:174
msgid "Requested network does not contain enough (2+) usable hosts"
msgstr "La red solicitada no contiene suficientes hosts utilizables (2+) "
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:167
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:178
msgid "CIDR is malformed."
msgstr "CIDR está formado incorrectamente."
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:170
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:181
msgid "Address could not be converted."
msgstr "La dirección no se ha podido convertir."
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:178
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:189
msgid "Quota exceeded, too many networks."
msgstr "Se ha superado la cuota, demasiadas redes."
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:191
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:202
msgid "Create networks failed"
msgstr "Ha fallado la creación de redes"
@@ -3821,23 +3683,13 @@ msgstr ""
"Valor de cuota %(value)s para %(key)s es menos que lo actualmente "
"utilizado y reservado %(quota_used)s"
-#: nova/api/openstack/compute/contrib/rescue.py:78
-#: nova/api/openstack/compute/plugins/v3/rescue.py:80
-msgid "The rescue operation is not implemented by this cloud."
-msgstr "La operación de rescate no está implementada por esta nube."
-
-#: nova/api/openstack/compute/contrib/rescue.py:98
-#: nova/api/openstack/compute/plugins/v3/rescue.py:104
-msgid "The unrescue operation is not implemented by this cloud."
-msgstr "La operación de abandono no está implementada por esta nube."
-
#: nova/api/openstack/compute/contrib/scheduler_hints.py:37
#: nova/api/openstack/compute/plugins/v3/scheduler_hints.py:39
msgid "Malformed scheduler_hints attribute"
msgstr "Atributo scheduler_hints formado incorrectamente"
#: nova/api/openstack/compute/contrib/security_group_default_rules.py:127
-#: nova/api/openstack/compute/contrib/security_groups.py:386
+#: nova/api/openstack/compute/contrib/security_groups.py:394
msgid "Not enough parameters to build a valid rule."
msgstr "No hay suficientes parámetros para crear una regla válida."
@@ -3849,81 +3701,80 @@ msgstr "Esta regla predeterminada ya existe."
msgid "security group default rule not found"
msgstr "regla predeterminada de grupo de seguridad no encontrada"
-#: nova/api/openstack/compute/contrib/security_groups.py:394
+#: nova/api/openstack/compute/contrib/security_groups.py:402
#, python-format
msgid "Bad prefix for network in cidr %s"
msgstr "Prefijo erróneo para red en cidr %s"
-#: nova/api/openstack/compute/contrib/security_groups.py:484
+#: nova/api/openstack/compute/contrib/security_groups.py:491
msgid "Security group not specified"
msgstr "Grupo de seguridad no especificado"
-#: nova/api/openstack/compute/contrib/security_groups.py:488
+#: nova/api/openstack/compute/contrib/security_groups.py:495
msgid "Security group name cannot be empty"
msgstr "El nombre de grupo de seguridad no puede estar vacío"
-#: nova/api/openstack/compute/contrib/server_external_events.py:92
+#: nova/api/openstack/compute/contrib/server_external_events.py:93
#: nova/api/openstack/compute/plugins/v3/server_external_events.py:65
#, python-format
msgid "event entity requires key %(key)s"
msgstr "La entidad de evento requiere clave %(key)s"
-#: nova/api/openstack/compute/contrib/server_external_events.py:96
+#: nova/api/openstack/compute/contrib/server_external_events.py:97
#: nova/api/openstack/compute/plugins/v3/server_external_events.py:69
#, python-format
msgid "event entity contains unsupported items: %s"
msgstr "La entidad de evento contiene objetos no soportados: %s"
-#: nova/api/openstack/compute/contrib/server_external_events.py:102
+#: nova/api/openstack/compute/contrib/server_external_events.py:103
#: nova/api/openstack/compute/plugins/v3/server_external_events.py:75
#, python-format
msgid "Invalid event status `%s'"
msgstr "Estado de evento inválido: `%s'"
-#: nova/api/openstack/compute/contrib/server_external_events.py:121
-#: nova/api/openstack/compute/plugins/v3/server_external_events.py:94
+#: nova/api/openstack/compute/contrib/server_external_events.py:126
#, python-format
-msgid "Create event %(name)s:%(tag)s for instance %(instance_uuid)s"
-msgstr "Cear evento %(name)s:%(tag)s para la instancia %(instance_uuid)s"
+msgid "Creating event %(name)s:%(tag)s for instance %(instance_uuid)s"
+msgstr ""
-#: nova/api/openstack/compute/contrib/server_external_events.py:130
+#: nova/api/openstack/compute/contrib/server_external_events.py:148
#: nova/api/openstack/compute/plugins/v3/server_external_events.py:103
msgid "No instances found for any event"
msgstr "No se han encontrado instancias en cualquier evento"
-#: nova/api/openstack/compute/contrib/server_groups.py:162
+#: nova/api/openstack/compute/contrib/server_groups.py:163
msgid "Conflicting policies configured!"
msgstr "Políticas conflictivas configuradas!"
-#: nova/api/openstack/compute/contrib/server_groups.py:167
+#: nova/api/openstack/compute/contrib/server_groups.py:168
#, python-format
msgid "Invalid policies: %s"
msgstr "Políticas inválidas: %s"
-#: nova/api/openstack/compute/contrib/server_groups.py:172
+#: nova/api/openstack/compute/contrib/server_groups.py:173
msgid "Duplicate policies configured!"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:177
+#: nova/api/openstack/compute/contrib/server_groups.py:178
msgid "the body is invalid."
msgstr "El cuerpo es inválido."
-#: nova/api/openstack/compute/contrib/server_groups.py:186
+#: nova/api/openstack/compute/contrib/server_groups.py:187
#, python-format
msgid "'%s' is either missing or empty."
msgstr "'%s' no se encuentra o está vacío."
-#: nova/api/openstack/compute/contrib/server_groups.py:192
+#: nova/api/openstack/compute/contrib/server_groups.py:193
#, python-format
msgid "Invalid format for name: '%s'"
msgstr "Formato inválido para el nombre: '%s'"
-#: nova/api/openstack/compute/contrib/server_groups.py:200
+#: nova/api/openstack/compute/contrib/server_groups.py:201
#, python-format
msgid "'%s' is not a list"
msgstr "'%s' no es una lista"
-#: nova/api/openstack/compute/contrib/server_groups.py:204
+#: nova/api/openstack/compute/contrib/server_groups.py:205
#, python-format
msgid "unsupported fields: %s"
msgstr "Campos no soportados: %s"
@@ -4033,16 +3884,16 @@ msgstr "access_ip_v4 no tiene el formato IPv4 apropiado"
msgid "access_ip_v6 is not proper IPv6 format"
msgstr "access_ip_v6 no tiene el formato IPv6 apropiado"
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:170
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:172
msgid "Invalid request format for metadata"
msgstr "Formato de solicitud inválido para metadatos"
-#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:103
+#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:109
#, python-format
msgid "Attach interface to %s"
msgstr "Asociar interfaz a %s"
-#: nova/api/openstack/compute/plugins/v3/cells.py:187
+#: nova/api/openstack/compute/plugins/v3/cells.py:189
#, python-format
msgid "Cell %s doesn't exist."
msgstr "No existe Cell %s."
@@ -4051,23 +3902,6 @@ msgstr "No existe Cell %s."
msgid "token not provided"
msgstr "token no proporcionado"
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:62
-#, python-format
-msgid "create_backup entity requires %s attribute"
-msgstr "La entidad create_backup necesita el atributo %s"
-
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:66
-msgid "Malformed create_backup entity"
-msgstr "Entidad create_backup mal formada"
-
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:72
-msgid "create_backup attribute 'rotation' must be an integer"
-msgstr "El atributo 'rotation' de create_backup debe ser un entero"
-
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:75
-msgid "create_backup attribute 'rotation' must be greater than or equal to zero"
-msgstr "El atributo 'rotation' en create_backup debe ser mayor qué o igual a cero"
-
#: nova/api/openstack/compute/plugins/v3/extended_volumes.py:98
msgid "The volume was either invalid or not attached to the instance."
msgstr "El volumen es inválido o no está asociado a la instancia."
@@ -4085,49 +3919,55 @@ msgstr ""
"El volumen %(volume_id)s no se encuentra asociado a la instancia "
"%(server_id)s"
-#: nova/api/openstack/compute/plugins/v3/flavors.py:94
+#: nova/api/openstack/compute/plugins/v3/flavors.py:96
#, python-format
msgid "Invalid min_ram filter [%s]"
msgstr "Filtro min_ram [%s] no válido"
-#: nova/api/openstack/compute/plugins/v3/flavors.py:101
+#: nova/api/openstack/compute/plugins/v3/flavors.py:103
#, python-format
msgid "Invalid min_disk filter [%s]"
msgstr "Filtro min_disk inválido [%s]"
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:66
-msgid "No or bad extra_specs provided"
-msgstr "extra_specs erróneas o no proporcionadas"
-
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:73
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:95
-msgid "Concurrent transaction has been committed, try again"
-msgstr "La transacción concurrente ha sido entregada, intente nuevamente."
-
-#: nova/api/openstack/compute/plugins/v3/hosts.py:120
-msgid "The request body invalid"
-msgstr "El contenido de la solicitud es inválido."
-
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:125
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:132
msgid "Need parameter 'query' to specify which hypervisor to filter on"
msgstr ""
"Se necesita el parámetro 'query' para especificar en qué hipervisor se "
"aplicará el filtro"
+#: nova/api/openstack/compute/plugins/v3/pause_server.py:59
+#: nova/api/openstack/compute/plugins/v3/pause_server.py:81
+msgid "Virt driver does not implement pause function."
+msgstr "El controlador Virt no implementa la función de pausa."
+
#: nova/api/openstack/compute/plugins/v3/server_actions.py:76
#, python-format
msgid "Action %s not found"
msgstr "Acción %s no encontrada"
-#: nova/api/openstack/compute/plugins/v3/servers.py:212
+#: nova/api/openstack/compute/plugins/v3/server_diagnostics.py:46
+msgid "Unable to get diagnostics, functionality not implemented"
+msgstr ""
+
+#: nova/api/openstack/compute/plugins/v3/server_external_events.py:94
+#, python-format
+msgid "Create event %(name)s:%(tag)s for instance %(instance_uuid)s"
+msgstr "Cear evento %(name)s:%(tag)s para la instancia %(instance_uuid)s"
+
+#: nova/api/openstack/compute/plugins/v3/servers.py:235
msgid "Invalid changes_since value"
msgstr "Valor changes_since inválido"
-#: nova/api/openstack/compute/plugins/v3/servers.py:335
+#: nova/api/openstack/compute/plugins/v3/servers.py:306
+#, python-format
+msgid "Flavor '%s' could not be found "
+msgstr "El sabor '%s' no se ha podido encontrar "
+
+#: nova/api/openstack/compute/plugins/v3/servers.py:358
msgid "Unknown argument: port"
msgstr "Argumento desconocido: puerto"
-#: nova/api/openstack/compute/plugins/v3/servers.py:343
+#: nova/api/openstack/compute/plugins/v3/servers.py:366
#, python-format
msgid ""
"Specified Fixed IP '%(addr)s' cannot be used with port '%(port)s': port "
@@ -4137,51 +3977,50 @@ msgstr ""
"puerto '%(port)s': el puerto ya cuenta con una dirección IP física "
"asignada."
-#: nova/api/openstack/compute/plugins/v3/servers.py:412
-#: nova/api/openstack/compute/plugins/v3/servers.py:585
-msgid "The request body is invalid"
-msgstr "El cuerpo solicitado es inválido"
-
-#: nova/api/openstack/compute/plugins/v3/servers.py:470
-#: nova/api/openstack/compute/plugins/v3/servers.py:498
+#: nova/api/openstack/compute/plugins/v3/servers.py:494
+#: nova/api/openstack/compute/plugins/v3/servers.py:522
msgid "Invalid flavor_ref provided."
msgstr "Se ha proporcionado un flavor_ref inválido."
-#: nova/api/openstack/compute/plugins/v3/servers.py:596
+#: nova/api/openstack/compute/plugins/v3/servers.py:620
+msgid "The request body is invalid"
+msgstr "El cuerpo solicitado es inválido"
+
+#: nova/api/openstack/compute/plugins/v3/servers.py:631
msgid "host_id cannot be updated."
msgstr "No se puede actualizar host_id."
-#: nova/api/openstack/compute/plugins/v3/servers.py:741
+#: nova/api/openstack/compute/plugins/v3/servers.py:782
msgid "Invalid image_ref provided."
msgstr "La image_ref proporcionada es inválida."
-#: nova/api/openstack/compute/plugins/v3/servers.py:760
+#: nova/api/openstack/compute/plugins/v3/servers.py:801
msgid "Missing image_ref attribute"
msgstr "Atributo image_ref ausente"
-#: nova/api/openstack/compute/plugins/v3/servers.py:767
+#: nova/api/openstack/compute/plugins/v3/servers.py:808
msgid "Missing flavor_ref attribute"
msgstr "Atributo flavor_ref ausente."
-#: nova/api/openstack/compute/plugins/v3/servers.py:780
+#: nova/api/openstack/compute/plugins/v3/servers.py:822
msgid "Resize request has invalid 'flavor_ref' attribute."
msgstr ""
"La solicitud de modifiación de tamaño tiene el atributo 'flavor_ref' "
"inválido."
-#: nova/api/openstack/compute/plugins/v3/servers.py:783
+#: nova/api/openstack/compute/plugins/v3/servers.py:825
msgid "Resize requests require 'flavor_ref' attribute."
msgstr "La solicitud de modificación de tamaño requiere el atributo 'flavor_ref'."
-#: nova/api/openstack/compute/plugins/v3/servers.py:799
+#: nova/api/openstack/compute/plugins/v3/servers.py:842
msgid "Could not parse image_ref from request."
msgstr "No se puede validar image_ref en la solicitud."
-#: nova/api/openstack/compute/plugins/v3/servers.py:883
+#: nova/api/openstack/compute/plugins/v3/servers.py:927
msgid "create_image entity requires name attribute"
msgstr "La entidad create_image requiere el atributo nombre."
-#: nova/api/openstack/compute/plugins/v3/servers.py:945
+#: nova/api/openstack/compute/plugins/v3/servers.py:989
msgid "Invalid admin_password"
msgstr "admin_password inválido"
@@ -4189,18 +4028,14 @@ msgstr "admin_password inválido"
msgid "Disabled reason contains invalid characters or is too long"
msgstr "Disabled reason contiene caracteres inválidos o es demasiado larga."
-#: nova/api/openstack/compute/views/servers.py:197
-msgid "Instance has had its instance_type removed from the DB"
-msgstr "En la instancia se ha eliminado el tipo de instancia de la base de datos"
-
-#: nova/api/validation/validators.py:61
+#: nova/api/validation/validators.py:73
#, python-format
msgid "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s"
msgstr ""
"Conenido inválido para el campo/atributo %(path)s. Valor: %(value)s. "
"%(message)s"
-#: nova/cells/manager.py:78
+#: nova/cells/manager.py:79
msgid ""
"The cells feature of Nova is considered experimental by the OpenStack "
"project because it receives much less testing than the rest of Nova. This"
@@ -4212,59 +4047,59 @@ msgstr ""
"Nova. Esto puede cambiar en el futuro, pero los desplegadores actuales "
"deben estar concientes que el uso en producción ahora puede ser riesgoso."
-#: nova/cells/messaging.py:205
+#: nova/cells/messaging.py:204
#, python-format
msgid "Error processing message locally: %(exc)s"
msgstr "Error al procesar el mensaje localmente: %(exc)s"
-#: nova/cells/messaging.py:366 nova/cells/messaging.py:374
+#: nova/cells/messaging.py:365 nova/cells/messaging.py:373
#, python-format
msgid "destination is %(target_cell)s but routing_path is %(routing_path)s"
msgstr ""
"el destino es %(target_cell)s pero la vía de acceso de direccionamiento "
"es %(routing_path)s"
-#: nova/cells/messaging.py:386
+#: nova/cells/messaging.py:385
#, python-format
msgid "Unknown %(cell_type)s when routing to %(target_cell)s"
msgstr "%(cell_type)s desconocido al direccionar a %(target_cell)s"
-#: nova/cells/messaging.py:410
+#: nova/cells/messaging.py:409
#, python-format
msgid "Error locating next hop for message: %(exc)s"
msgstr "Error al localizar el siguiente salto para el mensaje: %(exc)s"
-#: nova/cells/messaging.py:437
+#: nova/cells/messaging.py:436
#, python-format
msgid "Failed to send message to cell: %(next_hop)s: %(exc)s"
msgstr "No se ha podido enviar el mensaje a la célula: %(next_hop)s: %(exc)s"
-#: nova/cells/messaging.py:516
+#: nova/cells/messaging.py:515
#, python-format
msgid "Error locating next hops for message: %(exc)s"
msgstr "Error al localizar los saltos siguientes para el mensaje: %(exc)s"
-#: nova/cells/messaging.py:536
+#: nova/cells/messaging.py:535
#, python-format
msgid "Error sending message to next hops: %(exc)s"
msgstr "Error al enviar el mensaje a los saltos siguientes: %(exc)s"
-#: nova/cells/messaging.py:554
+#: nova/cells/messaging.py:553
#, python-format
msgid "Error waiting for responses from neighbor cells: %(exc)s"
msgstr "Error al esperar respuestas de células vecinas: %(exc)s"
-#: nova/cells/messaging.py:665
+#: nova/cells/messaging.py:664
#, python-format
msgid "Unknown method '%(method)s' in compute API"
msgstr "Método desconocido '%(method)s' en API de cálculo"
-#: nova/cells/messaging.py:1096
+#: nova/cells/messaging.py:1106
#, python-format
msgid "Got message to create instance fault: %(instance_fault)s"
msgstr "Se ha obtenido mensaje para crear error de instancia: %(instance_fault)s"
-#: nova/cells/messaging.py:1119
+#: nova/cells/messaging.py:1129
#, python-format
msgid ""
"Forcing a sync of instances, project_id=%(projid_str)s, "
@@ -4273,43 +4108,43 @@ msgstr ""
"Forzando una sincronización de instancias, project_id=%(projid_str)s, "
"updated_since=%(since_str)s"
-#: nova/cells/messaging.py:1198
+#: nova/cells/messaging.py:1208
#, python-format
msgid "No match when trying to update BDM: %(bdm)s"
msgstr "No se encontró resultado al intentar actualizar BDM: %(bdm)s"
-#: nova/cells/messaging.py:1673
+#: nova/cells/messaging.py:1683
#, python-format
msgid "No cell_name for %(method)s() from API"
msgstr "No hay cell_name para %(method)s() desde la API"
-#: nova/cells/messaging.py:1690
+#: nova/cells/messaging.py:1700
msgid "No cell_name for instance update from API"
msgstr "No hay cell_name para actualización de instancia desde la API"
-#: nova/cells/messaging.py:1853
+#: nova/cells/messaging.py:1863
#, python-format
msgid "Returning exception %s to caller"
msgstr "Devolviendo excepción %s al interlocutor"
-#: nova/cells/rpcapi.py:369
+#: nova/cells/rpcapi.py:378
msgid "Failed to notify cells of BDM update/create."
msgstr "Fallo al notificar las celdas de actualización/creación de BDM."
-#: nova/cells/rpcapi.py:385
+#: nova/cells/rpcapi.py:394
msgid "Failed to notify cells of BDM destroy."
msgstr "Fallo al notiifcar las celdas de destrucción de BDM"
-#: nova/cells/scheduler.py:192
+#: nova/cells/scheduler.py:191
#, python-format
msgid "Couldn't communicate with cell '%s'"
msgstr "No se puede comunicar con la celda '%s'"
-#: nova/cells/scheduler.py:196
+#: nova/cells/scheduler.py:195
msgid "Couldn't communicate with any cells"
msgstr "No se puede establecer comunicación con alguna celda"
-#: nova/cells/scheduler.py:234
+#: nova/cells/scheduler.py:233
#, python-format
msgid ""
"No cells available when scheduling. Will retry in %(sleep_time)s "
@@ -4318,17 +4153,22 @@ msgstr ""
"No hay celdas disponibles al planificar. Se reintentará dentro de "
"%(sleep_time)s segundo(s)"
-#: nova/cells/scheduler.py:240
+#: nova/cells/scheduler.py:239
#, python-format
msgid "Error scheduling instances %(instance_uuids)s"
msgstr "Error al planificar instancias %(instance_uuids)s"
-#: nova/cells/state.py:352
+#: nova/cells/state.py:182
+#, python-format
+msgid "DB error: %s"
+msgstr "Error de base de datos: %s"
+
+#: nova/cells/state.py:363
#, python-format
msgid "Unknown cell '%(cell_name)s' when trying to update capabilities"
msgstr "Célula '%(cell_name)s' desconocida al intentar actualizar prestaciones"
-#: nova/cells/state.py:367
+#: nova/cells/state.py:378
#, python-format
msgid "Unknown cell '%(cell_name)s' when trying to update capacities"
msgstr "Célula '%(cell_name)s' desconocida al intentar actualizar capacidades"
@@ -4372,73 +4212,73 @@ msgstr "Mascara de red a insertar en la configuración de openvpn"
msgid "Failed to load %s"
msgstr "Ha fallado la carga de %s"
-#: nova/cmd/baremetal_deploy_helper.py:211
+#: nova/cmd/baremetal_deploy_helper.py:210
#, python-format
msgid "parent device '%s' not found"
msgstr "El dispositivo principal '%s' no se ha encontrado"
-#: nova/cmd/baremetal_deploy_helper.py:214
+#: nova/cmd/baremetal_deploy_helper.py:213
#, python-format
msgid "root device '%s' not found"
msgstr "No se ha encontrado el dispositivo raíz '%s'"
-#: nova/cmd/baremetal_deploy_helper.py:216
+#: nova/cmd/baremetal_deploy_helper.py:215
#, python-format
msgid "swap device '%s' not found"
msgstr "No se ha encontrado el dispositivo de swap '%s'"
-#: nova/cmd/baremetal_deploy_helper.py:218
+#: nova/cmd/baremetal_deploy_helper.py:217
#, python-format
msgid "ephemeral device '%s' not found"
msgstr "Dispositivo efímero '%s' no encontrado"
-#: nova/cmd/baremetal_deploy_helper.py:228
+#: nova/cmd/baremetal_deploy_helper.py:227
msgid "Failed to detect root device UUID."
msgstr "Ha fallado la detección del dispositivo raíz UUID."
-#: nova/cmd/baremetal_deploy_helper.py:252
+#: nova/cmd/baremetal_deploy_helper.py:251
#, python-format
msgid "Cmd : %s"
msgstr "Cmd : %s"
-#: nova/cmd/baremetal_deploy_helper.py:253
+#: nova/cmd/baremetal_deploy_helper.py:252
#, python-format
msgid "StdOut : %r"
msgstr "StdOut : %r"
-#: nova/cmd/baremetal_deploy_helper.py:254
+#: nova/cmd/baremetal_deploy_helper.py:253
#, python-format
msgid "StdErr : %r"
msgstr "StdErr : %r"
-#: nova/cmd/baremetal_deploy_helper.py:282
+#: nova/cmd/baremetal_deploy_helper.py:281
#, python-format
msgid "start deployment for node %(node_id)s, params %(params)s"
msgstr ""
"Se ha iniciado el despliegue del nodo %(node_id)s con parámetros "
"%(params)s"
-#: nova/cmd/baremetal_deploy_helper.py:291
+#: nova/cmd/baremetal_deploy_helper.py:290
#, python-format
msgid "deployment to node %s failed"
msgstr "El despligue hacia el nodo %s ha fallado"
-#: nova/cmd/baremetal_deploy_helper.py:295
+#: nova/cmd/baremetal_deploy_helper.py:294
#, python-format
msgid "deployment to node %s done"
msgstr "despliegue hacia el nodo %s completo"
-#: nova/cmd/baremetal_deploy_helper.py:317
+#: nova/cmd/baremetal_deploy_helper.py:316
#, python-format
msgid "post: environ=%s"
msgstr "enviar: environ=%s"
-#: nova/cmd/baremetal_deploy_helper.py:336
+#: nova/cmd/baremetal_deploy_helper.py:335
#, python-format
msgid "Deploy agent error message: %s"
msgstr "Mensaje de error del agente de despliegue: %s"
-#: nova/cmd/baremetal_deploy_helper.py:360
+#: nova/cmd/baremetal_deploy_helper.py:359
#, python-format
msgid "request is queued: node %(node_id)s, params %(params)s"
msgstr "solicitud encolada: nodo %(node_id)s, parámetros %(params)s"
@@ -4465,19 +4305,19 @@ msgstr "Ha fallado la instrucción, por favor compruebe el log para más informa
msgid "No db access allowed in nova-compute: %s"
msgstr "No se permite acceso a la base de datos en nova-compute: %s"
-#: nova/cmd/dhcpbridge.py:109
+#: nova/cmd/dhcpbridge.py:108
#, python-format
msgid "No db access allowed in nova-dhcpbridge: %s"
msgstr "No se permite acceso a la base de datos en nova-dhcpbridge: %s"
-#: nova/cmd/dhcpbridge.py:132
+#: nova/cmd/dhcpbridge.py:131
#, python-format
msgid "Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'"
msgstr ""
"Se han llamado '%(action)s' para la mac '%(mac)s' con dirección IP "
"'%(ip)s'"
-#: nova/cmd/dhcpbridge.py:142
+#: nova/cmd/dhcpbridge.py:141
msgid "Environment variable 'NETWORK_ID' must be set."
msgstr "La variable de entorno 'NETWORK_ID' debe ser establecida."
@@ -4568,40 +4408,40 @@ msgstr ""
"ERROR: comandos de red no están soportados al utilizar la API neutron. "
"Utiliza python-neutronclient en su lugar."
-#: nova/cmd/manage.py:551 nova/tests/test_nova_manage.py:217
+#: nova/cmd/manage.py:551 nova/tests/test_nova_manage.py:218
msgid "id"
msgstr "id"
-#: nova/cmd/manage.py:552 nova/tests/test_nova_manage.py:218
+#: nova/cmd/manage.py:552 nova/tests/test_nova_manage.py:219
msgid "IPv4"
msgstr "IPv4"
-#: nova/cmd/manage.py:553 nova/tests/test_nova_manage.py:219
+#: nova/cmd/manage.py:553 nova/tests/test_nova_manage.py:220
msgid "IPv6"
msgstr "IPv6"
-#: nova/cmd/manage.py:554 nova/tests/test_nova_manage.py:220
+#: nova/cmd/manage.py:554 nova/tests/test_nova_manage.py:221
msgid "start address"
msgstr "dirección de inicio"
-#: nova/cmd/manage.py:555 nova/tests/test_nova_manage.py:221
+#: nova/cmd/manage.py:555 nova/tests/test_nova_manage.py:222
msgid "DNS1"
msgstr "DNS1"
-#: nova/cmd/manage.py:556 nova/tests/test_nova_manage.py:222
+#: nova/cmd/manage.py:556 nova/tests/test_nova_manage.py:223
msgid "DNS2"
msgstr "DNS2"
-#: nova/cmd/manage.py:557 nova/tests/test_nova_manage.py:223
+#: nova/cmd/manage.py:557 nova/tests/test_nova_manage.py:224
msgid "VlanID"
msgstr "ID de Vlan"
#: nova/cmd/manage.py:558 nova/cmd/manage.py:665
-#: nova/tests/test_nova_manage.py:224
+#: nova/tests/test_nova_manage.py:225
msgid "project"
msgstr "proyecto"
-#: nova/cmd/manage.py:559 nova/tests/test_nova_manage.py:225
+#: nova/cmd/manage.py:559 nova/tests/test_nova_manage.py:226
msgid "uuid"
msgstr "uuid"
@@ -4812,16 +4652,16 @@ msgstr "No hay entradas de nova en el registro de sistema!"
msgid "No db access allowed in nova-network: %s"
msgstr "No se permite acceso a base de datos en nova-network: %s"
-#: nova/compute/api.py:362
+#: nova/compute/api.py:355
msgid "Cannot run any more instances of this type."
msgstr "No se pueden ejecutar más instancias de este tipo. "
-#: nova/compute/api.py:369
+#: nova/compute/api.py:362
#, python-format
msgid "Can only run %s more instances of this type."
msgstr "Sólo se pueden ejecutar %s instancias más de este tipo. "
-#: nova/compute/api.py:381
+#: nova/compute/api.py:374
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)d "
@@ -4830,7 +4670,7 @@ msgstr ""
"Cuota %(overs)s excedida para %(pid)s, intentando ejecutar %(min_count)d "
"intsancias. %(msg)s"
-#: nova/compute/api.py:385
+#: nova/compute/api.py:378
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run between %(min_count)d "
@@ -4839,58 +4679,27 @@ msgstr ""
"Cuota %(overs)s excedida para %(pid)s, intentando ejecutar entre "
"%(min_count)d y %(max_count)d instancias. %(msg)s"
-#: nova/compute/api.py:406
+#: nova/compute/api.py:399
msgid "Metadata type should be dict."
msgstr "El tipo de metadato debería ser dict."
-#: nova/compute/api.py:412
-#, python-format
-msgid ""
-"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata "
-"properties"
-msgstr ""
-"Se ha superado la cuota para %(pid)s, se ha intentado definir "
-"%(num_metadata)s propiedades de metadatos"
-
-#: nova/compute/api.py:424
-#, python-format
-msgid "Metadata property key '%s' is not a string."
-msgstr "La clave de propiedad de los metadatos '%s' no es una cadena."
-
-#: nova/compute/api.py:427
-#, python-format
-msgid "Metadata property value '%(v)s' for key '%(k)s' is not a string."
-msgstr ""
-"El valor del atributo de metadatos '%(v)s' para la clave '%(k)s' no es "
-"una cadena."
-
-#: nova/compute/api.py:431
-msgid "Metadata property key blank"
-msgstr "Clave de propiedad de metadatos en blanco"
-
-#: nova/compute/api.py:434
+#: nova/compute/api.py:421
msgid "Metadata property key greater than 255 characters"
msgstr "Clave de propiedad metadatos de más de 255 caracteres "
-#: nova/compute/api.py:437
+#: nova/compute/api.py:424
msgid "Metadata property value greater than 255 characters"
msgstr "Valor de propiedad de metadatos de más de 255 caracteres "
-#: nova/compute/api.py:574
-msgid "Failed to set instance name using multi_instance_display_name_template."
-msgstr ""
-"Se ha encontrado un error en la definición del nombre de instancia "
-"mediante multi_instance_display_name_template."
-
-#: nova/compute/api.py:676
+#: nova/compute/api.py:663
msgid "Cannot attach one or more volumes to multiple instances"
msgstr "No se pueden conectar uno o más volúmenes a varias instancias"
-#: nova/compute/api.py:718
+#: nova/compute/api.py:705
msgid "The requested availability zone is not available"
msgstr "La zona de disponibilidad solicitada no está disponible"
-#: nova/compute/api.py:1119
+#: nova/compute/api.py:1107
msgid ""
"Images with destination_type 'volume' need to have a non-zero size "
"specified"
@@ -4898,13 +4707,13 @@ msgstr ""
"Las imágenes con destination_type 'colume? necesitan tener un tamaño "
"especificado diferente a cero"
-#: nova/compute/api.py:1150
+#: nova/compute/api.py:1138
msgid "More than one swap drive requested."
msgstr "Más de un controlador de intercambio ha sido solicitado."
-#: nova/compute/api.py:1299
-#: nova/tests/api/openstack/compute/test_servers.py:3122
-#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2460
+#: nova/compute/api.py:1277
+#: nova/tests/api/openstack/compute/test_servers.py:3199
+#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2488
msgid ""
"Unable to launch multiple instances with a single configured port ID. "
"Please launch your instance one by one with different ports."
@@ -4913,37 +4722,33 @@ msgstr ""
"puerto configurado. Por favor lanza tu instancia una por una con puertos "
"diferentes."
-#: nova/compute/api.py:1401
+#: nova/compute/api.py:1298
+msgid "max_count cannot be greater than 1 if an fixed_ip is specified."
+msgstr ""
+
+#: nova/compute/api.py:1404
msgid "instance termination disabled"
msgstr "terminación de instancia inhabilitada"
-#: nova/compute/api.py:1416
+#: nova/compute/api.py:1418
#, python-format
msgid "Working on deleting snapshot %s from shelved instance..."
msgstr ""
"Trabajando en la remoción de la instantánea %s de la instancia "
"almacenada..."
-#: nova/compute/api.py:1423
+#: nova/compute/api.py:1425
#, python-format
msgid "Failed to delete snapshot from shelved instance (%s)."
msgstr "Fallo al remover la instantánea de la instancia almacenada (%s)."
-#: nova/compute/api.py:1427
-msgid ""
-"Something wrong happened when trying to delete snapshot from shelved "
-"instance."
-msgstr ""
-"Algo malo ha pasado al intentar eliminar la instantánea de la imagen "
-"almacenada."
-
-#: nova/compute/api.py:1492
+#: nova/compute/api.py:1486
msgid "Instance is already in deleting state, ignoring this request"
msgstr ""
"La instancia ya se encuentra en estado de remoción, ignorando esta "
"solicitud"
-#: nova/compute/api.py:1540
+#: nova/compute/api.py:1521
#, python-format
msgid ""
"Found an unconfirmed migration during delete, id: %(id)s, status: "
@@ -4952,105 +4757,104 @@ msgstr ""
"Se ha encontrado una migración no confirmada durante la remoción, "
"identificador: %(id)s, estado: %(status)s"
-#: nova/compute/api.py:1550
+#: nova/compute/api.py:1531
msgid "Instance may have been confirmed during delete"
msgstr "la instanacia debe haber sido confirmada durante la remoción"
-#: nova/compute/api.py:1567
+#: nova/compute/api.py:1548
#, python-format
msgid "Migration %s may have been confirmed during delete"
msgstr "La migración %s debe haber sido conifrmada durante la remoción"
-#: nova/compute/api.py:1603
+#: nova/compute/api.py:1583
#, python-format
msgid "Flavor %d not found"
msgstr "El sabor %d no ha sido encontrado"
-#: nova/compute/api.py:1621
+#: nova/compute/api.py:1603
#, python-format
msgid "instance's host %s is down, deleting from database"
msgstr "el host de la instancia %s está inactivos, se suprime de la base de datos"
-#: nova/compute/api.py:1648 nova/compute/manager.py:2279
+#: nova/compute/api.py:1630
#, python-format
msgid "Ignoring volume cleanup failure due to %s"
msgstr "Ignorando la anomalía de limpieza de volumen debido a %s "
-#: nova/compute/api.py:2043
+#: nova/compute/api.py:2030
#, python-format
msgid "snapshot for %s"
msgstr "instantánea para %s "
-#: nova/compute/api.py:2415
+#: nova/compute/api.py:2368
+msgid "Resize to zero disk flavor is not allowed."
+msgstr ""
+
+#: nova/compute/api.py:2407
#, python-format
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance."
msgstr ""
"%(overs)s cuota excedida para %(pid)s, se ha intentado redimensionar la "
"instancia. "
-#: nova/compute/api.py:2584
+#: nova/compute/api.py:2582
msgid "Cannot rescue a volume-backed instance"
msgstr "No se puede rescatar una instancia de volume-backed"
-#: nova/compute/api.py:2811
+#: nova/compute/api.py:2809
msgid "Volume must be attached in order to detach."
msgstr "El volumen debe estar conectado para desconectarse."
-#: nova/compute/api.py:2831
+#: nova/compute/api.py:2829
msgid "Old volume is attached to a different instance."
msgstr "Volumen antigüo está ligado a una instancia diferente."
-#: nova/compute/api.py:2834
+#: nova/compute/api.py:2832
msgid "New volume must be detached in order to swap."
msgstr ""
"El nuevo volumen debe ser desasociado para poder activar la memoria de "
"intercambio."
-#: nova/compute/api.py:2837
+#: nova/compute/api.py:2835
msgid "New volume must be the same size or larger."
msgstr "El nuevo volumen debe ser del mismo o de mayor tamaño."
-#: nova/compute/api.py:3032
+#: nova/compute/api.py:3042
#, python-format
msgid "Instance compute service state on %s expected to be down, but it was up."
msgstr ""
"El estado de la instancia del servicio de cómputo en %s debería ser "
"inactivo, pero se encontraba activo."
-#: nova/compute/api.py:3335
+#: nova/compute/api.py:3347
msgid "Host aggregate is not empty"
msgstr "El agregado de anfitrión no está vacío"
-#: nova/compute/api.py:3368
+#: nova/compute/api.py:3380
#, python-format
msgid "More than 1 AZ for host %s"
msgstr ""
-#: nova/compute/api.py:3403
+#: nova/compute/api.py:3415
#, python-format
msgid "Host already in availability zone %s"
msgstr "Anfitrión actualmente en zona de disponibilidad %s"
-#: nova/compute/api.py:3491 nova/tests/compute/test_keypairs.py:135
+#: nova/compute/api.py:3503 nova/tests/compute/test_keypairs.py:137
msgid "Keypair name contains unsafe characters"
msgstr "El nombre de par de claves contiene caracteres no seguros"
-#: nova/compute/api.py:3495 nova/tests/compute/test_keypairs.py:127
-#: nova/tests/compute/test_keypairs.py:131
-msgid "Keypair name must be between 1 and 255 characters long"
-msgstr "El nombre de par de claves debe tener entre 1 y 255 caracteres de longitud"
+#: nova/compute/api.py:3509 nova/tests/compute/test_keypairs.py:127
+#: nova/tests/compute/test_keypairs.py:132
+msgid "Keypair name must be string and between 1 and 255 characters long"
+msgstr ""
-#: nova/compute/api.py:3583
+#: nova/compute/api.py:3597
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr "El grupo de seguridad %s no es una serie o Unicode "
-#: nova/compute/api.py:3586
-#, python-format
-msgid "Security group %s cannot be empty."
-msgstr "El grupo de seguridad %s no puede estar vacío."
-
-#: nova/compute/api.py:3594
+#: nova/compute/api.py:3607
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
@@ -5059,58 +4863,49 @@ msgstr ""
"El valor (%(value)s) para el parámetro Group%(property)s es inválido. El "
"contenido se limita a '%(allowed)s'."
-#: nova/compute/api.py:3600
-#, python-format
-msgid "Security group %s should not be greater than 255 characters."
-msgstr "El grupo de seguridad %s no debe tener más de 255 caracteres. "
-
-#: nova/compute/api.py:3618
+#: nova/compute/api.py:3627
msgid "Quota exceeded, too many security groups."
msgstr "Cuota superada, demasiados grupos de seguridad. "
-#: nova/compute/api.py:3621
+#: nova/compute/api.py:3630
#, python-format
msgid "Create Security Group %s"
msgstr "Crear Grupo de Seguridad %s"
-#: nova/compute/api.py:3633
+#: nova/compute/api.py:3642
#, python-format
msgid "Security group %s already exists"
msgstr "El grupo de seguridad %s ya existe"
-#: nova/compute/api.py:3646
+#: nova/compute/api.py:3655
#, python-format
msgid "Unable to update system group '%s'"
msgstr "Incapaz de actualizar el grupo de sistema '%s'"
-#: nova/compute/api.py:3708
+#: nova/compute/api.py:3717
#, python-format
msgid "Unable to delete system group '%s'"
msgstr "No se ha podido suprimir el grupo de sistemas '%s'"
-#: nova/compute/api.py:3713
+#: nova/compute/api.py:3722
msgid "Security group is still in use"
msgstr "El grupo de seguridad aún se está utilizando"
-#: nova/compute/api.py:3723
-msgid "Failed to update usages deallocating security group"
-msgstr "No se han podido actualizar los usos desasignando el grupo de seguridad "
-
-#: nova/compute/api.py:3726
+#: nova/compute/api.py:3735
#, python-format
msgid "Delete security group %s"
msgstr "Borrar grupo de seguridad %s"
-#: nova/compute/api.py:3802 nova/compute/api.py:3885
+#: nova/compute/api.py:3811 nova/compute/api.py:3894
#, python-format
msgid "Rule (%s) not found"
msgstr "No se ha encontrado la regla (%s)"
-#: nova/compute/api.py:3818
+#: nova/compute/api.py:3827
msgid "Quota exceeded, too many security group rules."
msgstr "Cuota superada, demasiadas reglas de grupo de seguridad "
-#: nova/compute/api.py:3821
+#: nova/compute/api.py:3830
#, python-format
msgid ""
"Security group %(name)s added %(protocol)s ingress "
@@ -5119,7 +4914,7 @@ msgstr ""
"Grupo de seguridad %(name)s ha agregado %(protocol)s al ingreso "
"(%(from_port)s:%(to_port)s)"
-#: nova/compute/api.py:3836
+#: nova/compute/api.py:3845
#, python-format
msgid ""
"Security group %(name)s removed %(protocol)s ingress "
@@ -5128,60 +4923,52 @@ msgstr ""
"El grupo de seguridad %(name)s ha removido %(protocol)s del ingreso "
"(%(from_port)s:%(to_port)s)"
-#: nova/compute/api.py:3892
+#: nova/compute/api.py:3901
msgid "Security group id should be integer"
msgstr "El id de grupo de seguridad debe ser un entero"
-#: nova/compute/claims.py:135
+#: nova/compute/claims.py:126
#, python-format
-msgid ""
-"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs "
-"%(vcpus)d"
+msgid "Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB"
msgstr ""
-"Intentando reclamación: memoria %(memory_mb)d MB, disco %(disk_gb)d GB, "
-"VCPU %(vcpus)d"
-#: nova/compute/claims.py:150
+#: nova/compute/claims.py:140
msgid "Claim successful"
msgstr "Reclamación satisfactoria"
-#: nova/compute/claims.py:153
+#: nova/compute/claims.py:143
msgid "memory"
msgstr "memoria"
-#: nova/compute/claims.py:162
+#: nova/compute/claims.py:152
msgid "disk"
msgstr "Disco"
-#: nova/compute/claims.py:177 nova/compute/claims.py:249
+#: nova/compute/claims.py:167 nova/compute/claims.py:230
msgid "Claim pci failed."
msgstr "Reclamación pci fallida."
-#: nova/compute/claims.py:180
-msgid "CPUs"
-msgstr "CPUs"
-
-#: nova/compute/claims.py:192
+#: nova/compute/claims.py:177
#, python-format
msgid "Total %(type)s: %(total)d %(unit)s, used: %(used).02f %(unit)s"
msgstr "%(type)s totales: %(total)d %(unit)s utilizados: %(used).02f %(unit)s"
-#: nova/compute/claims.py:199
+#: nova/compute/claims.py:184
#, python-format
msgid "%(type)s limit not specified, defaulting to unlimited"
msgstr "Límite de %(type)s no especificado, predeterminando a ilimitado"
-#: nova/compute/claims.py:206
+#: nova/compute/claims.py:191
#, python-format
msgid "%(type)s limit: %(limit).02f %(unit)s, free: %(free).02f %(unit)s"
msgstr "Límite de %(type)s: %(limit).02f %(unit)s, libre: %(free).02f %(unit)s"
-#: nova/compute/claims.py:212
+#: nova/compute/claims.py:197
#, python-format
msgid "Free %(type)s %(free).02f %(unit)s < requested %(requested)d %(unit)s"
msgstr "Libres %(type)s %(free).02f %(unit)s < solicitados %(requested)d %(unit)s"
-#: nova/compute/flavors.py:109
+#: nova/compute/flavors.py:110
msgid ""
"Flavor names can only contain alphanumeric characters, periods, dashes, "
"underscores and spaces."
@@ -5189,13 +4976,13 @@ msgstr ""
"Los nombres de los sabores solamente puede contener caracteres "
"alfanumericos, puntos, guión, guión bajo y espacios."
-#: nova/compute/flavors.py:119
+#: nova/compute/flavors.py:120
msgid "id cannot contain leading and/or trailing whitespace(s)"
msgstr ""
"El identificador no puede contener espacio(s) vacío(s) en su inicio o "
"final"
-#: nova/compute/flavors.py:129
+#: nova/compute/flavors.py:130
msgid ""
"Flavor id can only contain letters from A-Z (both cases), periods, "
"dashes, underscores and spaces."
@@ -5203,26 +4990,16 @@ msgstr ""
"El identificador de sabor solo puede contener letras de la A-Z "
"(mayúsculas y minúsculas), puntos, guión, guión bajo y espacios."
-#: nova/compute/flavors.py:150
+#: nova/compute/flavors.py:151
#, python-format
msgid "'rxtx_factor' argument must be a float between 0 and %g"
msgstr "El argumento 'rxtx_factor' debe ser un flotante entre 0 y %g"
-#: nova/compute/flavors.py:161
+#: nova/compute/flavors.py:162
msgid "is_public must be a boolean"
msgstr "is_public debe ser un booleano"
-#: nova/compute/flavors.py:166
-#, python-format
-msgid "DB error: %s"
-msgstr "Error de base de datos: %s"
-
-#: nova/compute/flavors.py:177
-#, python-format
-msgid "Instance type %s not found for deletion"
-msgstr "No se ha encontrado el tipo de instancia %s para suprimirse"
-
-#: nova/compute/flavors.py:327
+#: nova/compute/flavors.py:328
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
@@ -5230,26 +5007,21 @@ msgstr ""
"Los nombres de las claves solo pueden contener caracteres alfanuméricos, "
"punto, guión, guión bajo, dos puntos y espacios."
-#: nova/compute/manager.py:278
+#: nova/compute/manager.py:284
#, python-format
msgid "Task possibly preempted: %s"
msgstr "Tarea posiblemente preapropiada: %s"
-#: nova/compute/manager.py:360 nova/compute/manager.py:2849
-#, python-format
-msgid "Error while trying to clean up image %s"
-msgstr "Error al intentar limpiar imagen %s"
-
-#: nova/compute/manager.py:501
+#: nova/compute/manager.py:508
msgid "Instance event failed"
msgstr "El evento de instancia ha fallado"
-#: nova/compute/manager.py:600
+#: nova/compute/manager.py:608
#, python-format
msgid "%s is not a valid node managed by this compute host."
msgstr "%s no es un nodo válido administrado por este anfitrión de cómputo."
-#: nova/compute/manager.py:698
+#: nova/compute/manager.py:714
#, python-format
msgid ""
"Deleting instance as its host (%(instance_host)s) is not equal to our "
@@ -5258,11 +5030,11 @@ msgstr ""
"Suprimiendo instancia porque el host (%(instance_host)s) no es igual a "
"nuestro host (%(our_host)s)."
-#: nova/compute/manager.py:713
+#: nova/compute/manager.py:729
msgid "Instance has been marked deleted already, removing it from the hypervisor."
msgstr "La instancia ya ha sido marcada como eliminada, removiendo del hipervisor."
-#: nova/compute/manager.py:733
+#: nova/compute/manager.py:749
msgid ""
"Hypervisor driver does not support instance shared storage check, "
"assuming it's not on shared storage"
@@ -5270,15 +5042,7 @@ msgstr ""
"El hipervisor no soporta la validación de almacenamiento compartido entre"
" instancias, asumiendo que no se encuentra en almacenamiento compartido."
-#: nova/compute/manager.py:739
-msgid "Failed to check if instance shared"
-msgstr "Fallo al verificar si la instancia se encuentra compartida"
-
-#: nova/compute/manager.py:805 nova/compute/manager.py:856
-msgid "Failed to complete a deletion"
-msgstr "Fallo durante la compleción una remoción"
-
-#: nova/compute/manager.py:838
+#: nova/compute/manager.py:854
msgid ""
"Service started deleting the instance during the previous run, but did "
"not finish. Restarting the deletion now."
@@ -5286,7 +5050,7 @@ msgstr ""
"El servicio ha iniciado la remoción de la instancia durante la ejecución "
"previa, pero no ha finalizado. Reiniciando la remoción ahora."
-#: nova/compute/manager.py:879
+#: nova/compute/manager.py:895
#, python-format
msgid ""
"Instance in transitional state (%(task_state)s) at start-up and power "
@@ -5295,105 +5059,81 @@ msgstr ""
"Instancia en estado transicional (%(task_state)s) al arranque y estado de"
" energía es (%(power_state)s), limpiando el estado de la tarea"
-#: nova/compute/manager.py:897
-msgid "Failed to stop instance"
-msgstr "Fallo al detener instancia"
-
-#: nova/compute/manager.py:909
-msgid "Failed to start instance"
-msgstr "Fallo al iniciar instancia"
-
-#: nova/compute/manager.py:934
-msgid "Failed to revert crashed migration"
-msgstr "Se ha encontrado un error en al revertir la migración colgada"
-
-#: nova/compute/manager.py:937
+#: nova/compute/manager.py:953
msgid "Instance found in migrating state during startup. Resetting task_state"
msgstr ""
"Se ha encontrado una instancia en estado de migración durante el inicio. "
"Restableciendo task_state"
-#: nova/compute/manager.py:954
+#: nova/compute/manager.py:970
msgid "Rebooting instance after nova-compute restart."
msgstr "Rearrancando instancia después de reiniciar nova-compute. "
-#: nova/compute/manager.py:964
+#: nova/compute/manager.py:980
msgid "Hypervisor driver does not support resume guests"
msgstr "El controlador de hipervisor no soporta reanudar invitados "
-#: nova/compute/manager.py:969
+#: nova/compute/manager.py:985
msgid "Failed to resume instance"
msgstr "No se ha podido reanudar la instancia"
-#: nova/compute/manager.py:978
+#: nova/compute/manager.py:994
msgid "Hypervisor driver does not support firewall rules"
msgstr "El controlador de hipervisor no soporta reglas de cortafuegos "
-#: nova/compute/manager.py:1003
+#: nova/compute/manager.py:1019
#, python-format
-msgid "Lifecycle event %(state)d on VM %(uuid)s"
-msgstr "Suceso de ciclo de vida %(state)d en máquina virtual %(uuid)s"
+msgid "VM %(state)s (Lifecycle Event)"
+msgstr ""
-#: nova/compute/manager.py:1019
+#: nova/compute/manager.py:1035
#, python-format
msgid "Unexpected power state %d"
msgstr "Estado de alimentación inesperado %d"
-#: nova/compute/manager.py:1124
+#: nova/compute/manager.py:1140
msgid "Hypervisor driver does not support security groups."
msgstr "El controlador del hipervisor no soporta grupos de seguridad."
-#: nova/compute/manager.py:1164
+#: nova/compute/manager.py:1178
#, python-format
msgid "Volume id: %s finished being created but was not set as 'available'"
msgstr ""
"El volumen con id: %s ha finalizado su creación pero no ha sido marcado "
"como 'disponible'"
-#: nova/compute/manager.py:1222 nova/compute/manager.py:1978
+#: nova/compute/manager.py:1235 nova/compute/manager.py:2064
msgid "Success"
msgstr "Éxito"
-#: nova/compute/manager.py:1246
+#: nova/compute/manager.py:1259
msgid "Instance disappeared before we could start it"
msgstr "La instancia ha desaparecido antes de poder iniciarla"
-#: nova/compute/manager.py:1274
+#: nova/compute/manager.py:1286
msgid "Anti-affinity instance group policy was violated."
msgstr "la política de grupo de anti-afinidad fue violada."
-#: nova/compute/manager.py:1351
-msgid "Failed to dealloc network for deleted instance"
-msgstr "No se ha podido desasignar la red para la instancia suprimida"
-
-#: nova/compute/manager.py:1356
+#: nova/compute/manager.py:1369
msgid "Instance disappeared during build"
msgstr "La instancia despareció durante su construcción"
-#: nova/compute/manager.py:1372
-msgid "Failed to dealloc network for failed instance"
-msgstr "Fallo al desasociar red para la instancia fallida"
-
-#: nova/compute/manager.py:1399
+#: nova/compute/manager.py:1412
#, python-format
msgid "Error: %s"
msgstr "Error: %s"
-#: nova/compute/manager.py:1445 nova/compute/manager.py:3473
-msgid "Error trying to reschedule"
-msgstr "Error al intentar volver a programar "
-
-#: nova/compute/manager.py:1500
+#: nova/compute/manager.py:1514
msgid "Instance build timed out. Set to error state."
msgstr ""
"La compilación de instancia ha excedido el tiempo de espera. Se ha estado"
" en estado erróneo. "
-#: nova/compute/manager.py:1510 nova/compute/manager.py:1870
+#: nova/compute/manager.py:1524 nova/compute/manager.py:1894
msgid "Starting instance..."
msgstr "Iniciando instancia..."
-#: nova/compute/manager.py:1528
+#: nova/compute/manager.py:1542
#, python-format
msgid ""
"Treating negative config value (%(retries)s) for "
@@ -5402,135 +5142,72 @@ msgstr ""
"Tratando el valor negativo de configuración (%(retries)s) para "
"'network_allocate_retries' como 0."
-#: nova/compute/manager.py:1553
-#, python-format
-msgid "Instance failed network setup after %(attempts)d attempt(s)"
-msgstr ""
-"La configuración de red de la instancia falló después de %(attempts)d "
-"intento(s)"
-
-#: nova/compute/manager.py:1557
+#: nova/compute/manager.py:1571
#, python-format
msgid "Instance failed network setup (attempt %(attempt)d of %(attempts)d)"
msgstr ""
"Fallo de configuración de red de la instancia (intento %(attempt)d de "
"%(attempts)d)"
-#: nova/compute/manager.py:1738
-msgid "Instance failed block device setup"
-msgstr "Ha fallado la configuración de dispositivo de bloque en la instancia"
-
-#: nova/compute/manager.py:1758 nova/compute/manager.py:2086
-#: nova/compute/manager.py:3985
-msgid "Instance failed to spawn"
-msgstr "La instancia no se ha podido generar"
-
-#: nova/compute/manager.py:1937
-msgid "Unexpected build failure, not rescheduling build."
-msgstr "Fallo de compilación inesperado, no se reprogramará la compilación."
-
-#: nova/compute/manager.py:2002
+#: nova/compute/manager.py:2027
#, python-format
msgid "Failed to allocate the network(s) with error %s, not rescheduling."
msgstr ""
-#: nova/compute/manager.py:2008 nova/compute/manager.py:2048
-msgid "Failed to allocate network(s)"
-msgstr "Fallo al asociar red(es)"
-
-#: nova/compute/manager.py:2012 nova/compute/manager.py:2050
+#: nova/compute/manager.py:2037 nova/compute/manager.py:2087
msgid "Failed to allocate the network(s), not rescheduling."
msgstr "Fallo al asociar la(s) red(es), no se reprogramará."
-#: nova/compute/manager.py:2074
-msgid "Failure prepping block device"
-msgstr "Fallo al preparar el dispositivo de bloques"
-
-#: nova/compute/manager.py:2076
+#: nova/compute/manager.py:2113
msgid "Failure prepping block device."
msgstr "Fallo al preparar el dispositivo de bloque."
-#: nova/compute/manager.py:2099
+#: nova/compute/manager.py:2134
msgid "Could not clean up failed build, not rescheduling"
msgstr "No se puede limpiar la compilación fallida, no se reprogramará."
-#: nova/compute/manager.py:2109
-msgid "Failed to deallocate networks"
-msgstr "Fallo al desasociar redes"
-
-#: nova/compute/manager.py:2130
-msgid "Failed to cleanup volumes for failed build, not rescheduling"
-msgstr ""
-"Fallo al limpiar los volúmenes para la compilación fallida, no se "
-"reprogramará"
-
-#: nova/compute/manager.py:2169
+#: nova/compute/manager.py:2192
msgid "Failed to deallocate network for instance."
msgstr "Se ha encontrado un error al desasignar la red para la instancia"
-#: nova/compute/manager.py:2178
+#: nova/compute/manager.py:2213
#, python-format
msgid "%(action_str)s instance"
msgstr "%(action_str)s instancia"
-#: nova/compute/manager.py:2222
-#, python-format
-msgid "Ignoring DiskNotFound: %s"
-msgstr "Ignorando DiskNotFound: %s"
-
-#: nova/compute/manager.py:2225
-#, python-format
-msgid "Ignoring VolumeNotFound: %s"
-msgstr "Ignorando VolumeNotFound: %s"
-
-#: nova/compute/manager.py:2324
+#: nova/compute/manager.py:2368
msgid "Instance disappeared during terminate"
msgstr "La instancia ha desaparecido durante la terminación"
-#: nova/compute/manager.py:2330 nova/compute/manager.py:3653
-#: nova/compute/manager.py:5671
-msgid "Setting instance vm_state to ERROR"
-msgstr "Estableciendo el vm_state de la instancia a ERROR"
-
-#: nova/compute/manager.py:2503
+#: nova/compute/manager.py:2554
msgid "Rebuilding instance"
msgstr "Volver a crear instancia"
-#: nova/compute/manager.py:2516
+#: nova/compute/manager.py:2567
msgid "Invalid state of instance files on shared storage"
msgstr "Estado no válido de archivos de instancia en almacenamiento compartido"
-#: nova/compute/manager.py:2520
+#: nova/compute/manager.py:2571
msgid "disk on shared storage, recreating using existing disk"
msgstr ""
"disco en almacenamiento compartido, volviendo a crear utilizando disco "
"existente"
-#: nova/compute/manager.py:2524
+#: nova/compute/manager.py:2575
#, python-format
msgid "disk not on shared storage, rebuilding from: '%s'"
msgstr "El disco on está en almacenamiento compartido, reconstruyendo desde: '%s'"
-#: nova/compute/manager.py:2535 nova/compute/manager.py:4790
-#, python-format
-msgid "Failed to get compute_info for %s"
-msgstr "Fallo al obtener compute_info para %s"
-
-#: nova/compute/manager.py:2611
-#, python-format
-msgid "bringing vm to original state: '%s'"
-msgstr "poniendo vm en estado original: '%s'"
-
-#: nova/compute/manager.py:2642
+#: nova/compute/manager.py:2694
#, python-format
msgid "Detaching from volume api: %s"
msgstr "Desconectando de la API del volumen: %s"
-#: nova/compute/manager.py:2669
+#: nova/compute/manager.py:2721
msgid "Rebooting instance"
msgstr "Rearrancando instancia"
-#: nova/compute/manager.py:2686
+#: nova/compute/manager.py:2738
#, python-format
msgid ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
@@ -5539,24 +5216,24 @@ msgstr ""
"intentando rearrancar una instancia que no se está ejecutando: (estado: "
"%(state)s se esperaba: %(running)s)"
-#: nova/compute/manager.py:2722
+#: nova/compute/manager.py:2774
msgid "Reboot failed but instance is running"
msgstr "Ha fallado el reinicio pero la instancia se mantiene en ejecución"
-#: nova/compute/manager.py:2730
+#: nova/compute/manager.py:2782
#, python-format
msgid "Cannot reboot instance: %s"
msgstr "No se puede reiniciar instancia: %s"
-#: nova/compute/manager.py:2742
+#: nova/compute/manager.py:2794
msgid "Instance disappeared during reboot"
msgstr "La instancia ha desaparecido durante el rearranque"
-#: nova/compute/manager.py:2810
+#: nova/compute/manager.py:2862
msgid "instance snapshotting"
msgstr "creación de instantánea de instancia"
-#: nova/compute/manager.py:2816
+#: nova/compute/manager.py:2868
#, python-format
msgid ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
@@ -5565,37 +5242,37 @@ msgstr ""
"intentando hacer una instantánea de una instancia que no se está "
"ejecutando: (estado: %(state)s se esperaba: %(running)s)"
-#: nova/compute/manager.py:2854
+#: nova/compute/manager.py:2901
+#, python-format
+msgid "Error while trying to clean up image %s"
+msgstr "Error al intentar limpiar imagen %s"
+
+#: nova/compute/manager.py:2906
msgid "Image not found during snapshot"
msgstr "No se ha encontrado la imagen durante la instantánea"
-#: nova/compute/manager.py:2936
+#: nova/compute/manager.py:2988
#, python-format
msgid "Failed to set admin password. Instance %s is not running"
msgstr ""
"No se ha podido establecer contraseña de administrador. La instancia %s "
"no está ejecutando"
-#: nova/compute/manager.py:2943
+#: nova/compute/manager.py:2995
msgid "Root password set"
msgstr "Contraseña raíz establecida"
-#: nova/compute/manager.py:2948
+#: nova/compute/manager.py:3000
msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr ""
"esta instancia de invitado o controlador no implementa set_admin_password"
" ."
-#: nova/compute/manager.py:2961
-#, python-format
-msgid "set_admin_password failed: %s"
-msgstr "set_admin_password ha fallado: %s"
-
-#: nova/compute/manager.py:2967
+#: nova/compute/manager.py:3019
msgid "error setting admin password"
msgstr "error al establecer contraseña de administrador"
-#: nova/compute/manager.py:2983
+#: nova/compute/manager.py:3035
#, python-format
msgid ""
"trying to inject a file into a non-running (state: %(current_state)s "
@@ -5604,12 +5281,12 @@ msgstr ""
"intentando inyectar un archivo hacia un inactivo (estado: "
"%(current_state)s esperado: %(expected_state)s)"
-#: nova/compute/manager.py:2988
+#: nova/compute/manager.py:3040
#, python-format
msgid "injecting file to %s"
msgstr "inyectando archivo a %s"
-#: nova/compute/manager.py:3006
+#: nova/compute/manager.py:3058
msgid ""
"Unable to find a different image to use for rescue VM, using instance's "
"current image"
@@ -5617,34 +5294,30 @@ msgstr ""
"No se ha podido encontrar una imagen diferente para utilizarla para VM de"
" rescate, se utiliza la imagen actual de la instancia"
-#: nova/compute/manager.py:3025
+#: nova/compute/manager.py:3077
msgid "Rescuing"
msgstr "Rescatando"
-#: nova/compute/manager.py:3046
-msgid "Error trying to Rescue Instance"
-msgstr "Error al intentar Rescatar Instancia"
-
-#: nova/compute/manager.py:3050
+#: nova/compute/manager.py:3102
#, python-format
msgid "Driver Error: %s"
msgstr "Error de dispositivo: %s"
-#: nova/compute/manager.py:3073
+#: nova/compute/manager.py:3125
msgid "Unrescuing"
msgstr "Cancelando rescate"
-#: nova/compute/manager.py:3144
+#: nova/compute/manager.py:3196
#, python-format
msgid "Migration %s is not found during confirmation"
msgstr "La migración %s no ha sido encontrada durante la confirmación"
-#: nova/compute/manager.py:3149
+#: nova/compute/manager.py:3201
#, python-format
msgid "Migration %s is already confirmed"
msgstr "La migración %s ya ha sido confirmada"
-#: nova/compute/manager.py:3153
+#: nova/compute/manager.py:3205
#, python-format
msgid ""
"Unexpected confirmation status '%(status)s' of migration %(id)s, exit "
@@ -5653,118 +5326,86 @@ msgstr ""
"Estado de confirmación inesperado '%(status)s' de la migración %(id)s, "
"salir del proceso de confirmación"
-#: nova/compute/manager.py:3167
+#: nova/compute/manager.py:3219
msgid "Instance is not found during confirmation"
msgstr "La instancia no ha sido encontrada durante la confirmación"
-#: nova/compute/manager.py:3348
+#: nova/compute/manager.py:3400
#, python-format
msgid "Updating instance to original state: '%s'"
msgstr "Actualizando el estado original de instancia hacia: '%s'"
-#: nova/compute/manager.py:3371
+#: nova/compute/manager.py:3423
msgid "Instance has no source host"
msgstr "La instancia no tiene ningún host de origen"
-#: nova/compute/manager.py:3377
+#: nova/compute/manager.py:3429
msgid "destination same as source!"
msgstr "destino igual que origen"
-#: nova/compute/manager.py:3395
+#: nova/compute/manager.py:3447
msgid "Migrating"
msgstr "Migrando"
-#: nova/compute/manager.py:3659
-#, python-format
-msgid "Failed to rollback quota for failed finish_resize: %s"
-msgstr "Fallo al revertir las cuotas para un finish_resize fallido: %s"
-
-#: nova/compute/manager.py:3719
+#: nova/compute/manager.py:3784
msgid "Pausing"
msgstr "Poniéndose en pausa"
-#: nova/compute/manager.py:3736
+#: nova/compute/manager.py:3801
msgid "Unpausing"
msgstr "Cancelando la pausa"
-#: nova/compute/manager.py:3777
+#: nova/compute/manager.py:3842 nova/compute/manager.py:3859
msgid "Retrieving diagnostics"
msgstr "Recuperando diagnósticos"
-#: nova/compute/manager.py:3812
+#: nova/compute/manager.py:3895
msgid "Resuming"
msgstr "Reanudando"
-#: nova/compute/manager.py:4028
+#: nova/compute/manager.py:4115
msgid "Get console output"
msgstr "Obtener salida de consola "
-#: nova/compute/manager.py:4227
+#: nova/compute/manager.py:4314
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr "Conectando el volumen %(volume_id)s a %(mountpoint)s"
-#: nova/compute/manager.py:4236
-#, python-format
-msgid "Failed to attach %(volume_id)s at %(mountpoint)s"
-msgstr "Fallo al asociar %(volume_id)s en %(mountpoint)s"
-
-#: nova/compute/manager.py:4252
+#: nova/compute/manager.py:4339
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr "Desconectar el volumen %(volume_id)s del punto de montaje %(mp)s"
-#: nova/compute/manager.py:4263
+#: nova/compute/manager.py:4350
msgid "Detaching volume from unknown instance"
msgstr "Desconectando volumen de instancia desconocida "
-#: nova/compute/manager.py:4275
-#, python-format
-msgid "Failed to detach volume %(volume_id)s from %(mp)s"
-msgstr "No se ha podido desconectar el volumen %(volume_id)s de %(mp)s"
-
-#: nova/compute/manager.py:4348
-#, python-format
-msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s"
-msgstr "Fallo para intercambiar volúmen %(old_volume_id)s por %(new_volume_id)s"
-
-#: nova/compute/manager.py:4355
-#, python-format
-msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s"
-msgstr ""
-"Fallo al conectar hacia al volúmen %(volume_id)s con el volumen en "
-"%(mountpoint)s"
-
-#: nova/compute/manager.py:4442
+#: nova/compute/manager.py:4544
#, python-format
msgid "allocate_port_for_instance returned %(ports)s ports"
msgstr "allocate_port_for_instance ha regresado %(ports)s puertos"
-#: nova/compute/manager.py:4462
+#: nova/compute/manager.py:4568
#, python-format
msgid "Port %s is not attached"
msgstr "El puerto %s no se encuentra asignado"
-#: nova/compute/manager.py:4474 nova/tests/compute/test_compute.py:10545
+#: nova/compute/manager.py:4580 nova/tests/compute/test_compute.py:10791
#, python-format
msgid "Host %s not found"
msgstr "No se ha encontrado el host %s"
-#: nova/compute/manager.py:4628
-#, python-format
-msgid "Pre live migration failed at %s"
-msgstr "Previo a migración en vivo falló en %s"
-
-#: nova/compute/manager.py:4658
+#: nova/compute/manager.py:4798
msgid "_post_live_migration() is started.."
msgstr "Se ha iniciado _post_live_migration()."
-#: nova/compute/manager.py:4731
+#: nova/compute/manager.py:4874
#, python-format
msgid "Migrating instance to %s finished successfully."
msgstr "La migración de la instancia hacia %s ha finalizado exitosamente."
-#: nova/compute/manager.py:4733
+#: nova/compute/manager.py:4876
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
@@ -5773,15 +5414,15 @@ msgstr ""
"encontrado: ningún dominio con un nombre coincidente.\" Este error se "
"puede ignorar sin ningún riesgo."
-#: nova/compute/manager.py:4758
+#: nova/compute/manager.py:4901
msgid "Post operation of migration started"
msgstr "Se ha iniciado la operación posterior de migración"
-#: nova/compute/manager.py:4967
+#: nova/compute/manager.py:5106
msgid "An error occurred while refreshing the network cache."
msgstr "Ha ocurrido un error al actualizar el cache de red."
-#: nova/compute/manager.py:5021
+#: nova/compute/manager.py:5159
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
@@ -5790,12 +5431,12 @@ msgstr ""
"Se han encontrado %(migration_count)d migraciones sin confirmar de más de"
" %(confirm_window)d segundos"
-#: nova/compute/manager.py:5026
+#: nova/compute/manager.py:5164
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr "Estableciendo la %(migration_id)s en error: %(reason)s"
-#: nova/compute/manager.py:5035
+#: nova/compute/manager.py:5173
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
@@ -5804,32 +5445,28 @@ msgstr ""
"Confirmando automáticamente la migración %(migration_id)s para la "
"instancia %(instance_uuid)s"
-#: nova/compute/manager.py:5045
+#: nova/compute/manager.py:5183
#, python-format
msgid "Instance %s not found"
msgstr "No se ha encontrado la instancia %s"
-#: nova/compute/manager.py:5050
+#: nova/compute/manager.py:5188
msgid "In ERROR state"
msgstr "En estado de ERROR "
-#: nova/compute/manager.py:5057
+#: nova/compute/manager.py:5195
#, python-format
msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None"
msgstr "En los estados %(vm_state)s/%(task_state)s, no REDIMENSIONADO/Ninguno"
-#: nova/compute/manager.py:5068
+#: nova/compute/manager.py:5206
#, python-format
msgid "Error auto-confirming resize: %s. Will retry later."
msgstr ""
"Error auto confirmando modificación de tamaño: %s. Se intentará "
"posteriormente."
-#: nova/compute/manager.py:5097
-msgid "Periodic task failed to offload instance."
-msgstr "Tarea periódica falló al descargar instancia."
-
-#: nova/compute/manager.py:5117
+#: nova/compute/manager.py:5255
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
@@ -5838,20 +5475,15 @@ msgstr ""
"Ejecutando auditoría de uso de instancia para %(host)s desde "
"%(begin_time)s hasta %(end_time)s. %(number_instances)s instancias."
-#: nova/compute/manager.py:5137
-#, python-format
-msgid "Failed to generate usage audit for instance on host %s"
-msgstr "No se ha podido generar auditoría de uso para la instancia en el host %s "
-
-#: nova/compute/manager.py:5166
+#: nova/compute/manager.py:5304
msgid "Updating bandwidth usage cache"
msgstr "Actualizando memoria caché de uso de ancho de banda"
-#: nova/compute/manager.py:5188
+#: nova/compute/manager.py:5326
msgid "Bandwidth usage not supported by hypervisor."
msgstr "Uso de ancho de banda no soportado por el hipervisor."
-#: nova/compute/manager.py:5311
+#: nova/compute/manager.py:5449
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
@@ -5860,20 +5492,7 @@ msgstr ""
"Se han encontrado %(num_db_instances)s en la base de datos y "
"%(num_vm_instances)s en el hipervisor."
-#: nova/compute/manager.py:5318 nova/compute/manager.py:5381
-#, python-format
-msgid "During sync_power_state the instance has a pending task (%(task)s). Skip."
-msgstr ""
-"Durante sync_power_state la instancia ha dejado una tarea pendiente "
-"(%(task)s). Omitir."
-
-#: nova/compute/manager.py:5342
-msgid "Periodic sync_power_state task had an error while processing an instance."
-msgstr ""
-"La tarea periódica sync_power_state ha tenido un error al procesar una "
-"instancia."
-
-#: nova/compute/manager.py:5368
+#: nova/compute/manager.py:5515
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
@@ -5882,63 +5501,65 @@ msgstr ""
"Durante el proceso sync_power, la instancia se ha movido del host %(src)s"
" al host %(dst)s"
-#: nova/compute/manager.py:5406
+#: nova/compute/manager.py:5528
+#, python-format
+msgid "During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+"Durante sync_power_state la instancia ha dejado una tarea pendiente "
+"(%(task)s). Omitir."
+
+#: nova/compute/manager.py:5553
msgid "Instance shutdown by itself. Calling the stop API."
msgstr "Conclusión de instancia por sí misma. Llamando a la API de detención."
-#: nova/compute/manager.py:5418 nova/compute/manager.py:5427
-#: nova/compute/manager.py:5458 nova/compute/manager.py:5469
-msgid "error during stop() in sync_power_state."
-msgstr "error durante stop() en sync_power_state."
-
-#: nova/compute/manager.py:5422
+#: nova/compute/manager.py:5572
msgid "Instance is suspended unexpectedly. Calling the stop API."
msgstr ""
"La instancia se ha suspendido inesperadamente. Llamando a la API de "
"detención."
-#: nova/compute/manager.py:5438
+#: nova/compute/manager.py:5588
msgid "Instance is paused unexpectedly. Ignore."
msgstr "La instancia se ha puesto en pausa inesperadamente. Ignorar. "
-#: nova/compute/manager.py:5444
+#: nova/compute/manager.py:5594
msgid "Instance is unexpectedly not found. Ignore."
msgstr "La instancia no se encuentra inesperadamente. Ignorar. "
-#: nova/compute/manager.py:5450
+#: nova/compute/manager.py:5600
msgid "Instance is not stopped. Calling the stop API."
msgstr "La instancia no se ha detenido. Llamando a la API de detención."
-#: nova/compute/manager.py:5464
+#: nova/compute/manager.py:5614
msgid "Paused instance shutdown by itself. Calling the stop API."
msgstr ""
"La instancia pausada se ha apagado a si misma. Llamando la API de "
"detención."
-#: nova/compute/manager.py:5478
+#: nova/compute/manager.py:5628
msgid "Instance is not (soft-)deleted."
msgstr "La instancia no se suprime (de forma no permanente). "
-#: nova/compute/manager.py:5507
+#: nova/compute/manager.py:5658
msgid "Reclaiming deleted instance"
msgstr "Reclamando instancia suprimida"
-#: nova/compute/manager.py:5511
+#: nova/compute/manager.py:5662
#, python-format
msgid "Periodic reclaim failed to delete instance: %s"
msgstr "Reclamación periódica falló al eliminar instancia: %s"
-#: nova/compute/manager.py:5536
+#: nova/compute/manager.py:5687
#, python-format
msgid "Deleting orphan compute node %s"
msgstr "Eliminando nodo de cómputo huérfano %s"
-#: nova/compute/manager.py:5544 nova/compute/resource_tracker.py:392
+#: nova/compute/manager.py:5695 nova/compute/resource_tracker.py:406
#, python-format
msgid "No service record for host %s"
msgstr "Ningún registro de servicio para el host %s "
-#: nova/compute/manager.py:5585
+#: nova/compute/manager.py:5735
#, python-format
msgid ""
"Detected instance with name label '%s' which is marked as DELETED but "
@@ -5948,7 +5569,7 @@ msgstr ""
" marcada como ELIMINADA pero todavía se encuentra presente en el "
"anfitrión."
-#: nova/compute/manager.py:5591
+#: nova/compute/manager.py:5741
#, python-format
msgid ""
"Powering off instance with name label '%s' which is marked as DELETED but"
@@ -5957,15 +5578,15 @@ msgstr ""
"Apagando la instancia con nombre '%s' que está marcada como ELIMINADA "
"pero sigue presente en el anfitrión."
-#: nova/compute/manager.py:5600
+#: nova/compute/manager.py:5750
msgid "set_bootable is not implemented for the current driver"
msgstr "set_bootable no está implementado en el controlador actual"
-#: nova/compute/manager.py:5605
+#: nova/compute/manager.py:5755
msgid "Failed to power off instance"
msgstr "Fallo al apagar la instancia"
-#: nova/compute/manager.py:5609
+#: nova/compute/manager.py:5759
#, python-format
msgid ""
"Destroying instance with name label '%s' which is marked as DELETED but "
@@ -5974,27 +5595,27 @@ msgstr ""
"Desrtuyendo instancia con etiqueta de nombre '%s' la cual ha sido marcada"
" como ELIMINADA pero todavía se encuentra presente en el anfitrión."
-#: nova/compute/manager.py:5619
+#: nova/compute/manager.py:5769
#, python-format
msgid "Periodic cleanup failed to delete instance: %s"
msgstr "Limpieza periódica falló al eliminar la instancia: %s"
-#: nova/compute/manager.py:5623
+#: nova/compute/manager.py:5773
#, python-format
msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action"
msgstr "Valor '%s' no reconocido para CONF.running_deleted_instance_action"
-#: nova/compute/manager.py:5654
+#: nova/compute/manager.py:5805
#, python-format
msgid "Setting instance back to %(state)s after: %(error)s"
msgstr "Estableciendo la instancia de vuelta a %(state)s tras: %(error)s"
-#: nova/compute/manager.py:5664
+#: nova/compute/manager.py:5815
#, python-format
msgid "Setting instance back to ACTIVE after: %s"
msgstr "Marcando la instancia de nuevo como ACTIVA después de: %s"
-#: nova/compute/resource_tracker.py:106
+#: nova/compute/resource_tracker.py:111
msgid ""
"Host field should not be set on the instance until resources have been "
"claimed."
@@ -6002,7 +5623,7 @@ msgstr ""
"El campo de host no se debe establecer en la instancia hasta que los "
"recursos se hayan reclamado."
-#: nova/compute/resource_tracker.py:111
+#: nova/compute/resource_tracker.py:116
msgid ""
"Node field should not be set on the instance until resources have been "
"claimed."
@@ -6010,16 +5631,16 @@ msgstr ""
"El campo Nodo no debe ser establecido en la instancia hasta que los "
"recursos han sido reclamados."
-#: nova/compute/resource_tracker.py:273
+#: nova/compute/resource_tracker.py:276
#, python-format
msgid "Cannot get the metrics from %s."
msgstr "No se pueden obtener las métricas de %s."
-#: nova/compute/resource_tracker.py:292
+#: nova/compute/resource_tracker.py:295
msgid "Auditing locally available compute resources"
msgstr "Auditando recursos de cálculo disponibles localmente"
-#: nova/compute/resource_tracker.py:297
+#: nova/compute/resource_tracker.py:300
msgid ""
"Virt driver does not support 'get_available_resource' Compute tracking "
"is disabled."
@@ -6027,54 +5648,56 @@ msgstr ""
"El controlador Virt no soporta 'get_available_resource'. El seguimiento "
"de cálculo está inhabilitado."
-#: nova/compute/resource_tracker.py:372
+#: nova/compute/resource_tracker.py:375
#, python-format
msgid "Compute_service record created for %(host)s:%(node)s"
msgstr "Registro compute_service creado para %(host)s:%(node)s"
-#: nova/compute/resource_tracker.py:378
+#: nova/compute/resource_tracker.py:381
#, python-format
msgid "Compute_service record updated for %(host)s:%(node)s"
msgstr "El registro compute_service se ha actualizado para %(host)s:%(node)s"
-#: nova/compute/resource_tracker.py:431
+#: nova/compute/resource_tracker.py:446
#, python-format
-msgid "Free ram (MB): %s"
-msgstr "RAM libre (MB): %s "
+msgid ""
+"Total physical ram (MB): %(pram)s, total allocated virtual ram (MB): "
+"%(vram)s"
+msgstr ""
-#: nova/compute/resource_tracker.py:432
+#: nova/compute/resource_tracker.py:450
#, python-format
msgid "Free disk (GB): %s"
msgstr "Disco libre (GB): %s "
-#: nova/compute/resource_tracker.py:437
+#: nova/compute/resource_tracker.py:454
#, python-format
-msgid "Free VCPUS: %s"
-msgstr "VCPUS libres: %s"
+msgid "Total usable vcpus: %(tcpu)s, total allocated vcpus: %(ucpu)s"
+msgstr ""
-#: nova/compute/resource_tracker.py:439
+#: nova/compute/resource_tracker.py:458
msgid "Free VCPU information unavailable"
msgstr "Información de VCPU libre no disponible"
-#: nova/compute/resource_tracker.py:442
+#: nova/compute/resource_tracker.py:461
#, python-format
msgid "PCI stats: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:478
+#: nova/compute/resource_tracker.py:512
#, python-format
msgid "Updating from migration %s"
msgstr "Actualizando desde la migración %s"
-#: nova/compute/resource_tracker.py:545
+#: nova/compute/resource_tracker.py:577
msgid "Instance not resizing, skipping migration."
msgstr "La instancia no se está redimensionando, se salta la migración."
-#: nova/compute/resource_tracker.py:560
+#: nova/compute/resource_tracker.py:592
msgid "Flavor could not be found, skipping migration."
msgstr "El sabor no puede ser encontrado, omitiendo migración."
-#: nova/compute/resource_tracker.py:650
+#: nova/compute/resource_tracker.py:682
#, python-format
msgid ""
"Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB "
@@ -6083,47 +5706,25 @@ msgstr ""
"Se ha detectado una instancia huérfana en ejecución: %(uuid)s "
"(consumiento %(memory_mb)s MB de memoria)"
-#: nova/compute/resource_tracker.py:664
+#: nova/compute/resource_tracker.py:696
#, python-format
msgid "Missing keys: %s"
msgstr "Faltan claves: %s"
#: nova/compute/rpcapi.py:58
msgid "No compute host specified"
-msgstr "No se ha especificado ningún host de cálculo"
-
-#: nova/compute/rpcapi.py:60
-#, python-format
-msgid "Unable to find host for Instance %s"
-msgstr "No se puede encontrar el host para la instancia %s "
-
-#: nova/compute/utils.py:209
-#, python-format
-msgid "Can't access image %(image_id)s: %(error)s"
-msgstr "No se puede acceder a la imagen %(image_id)s: %(error)s"
-
-#: nova/compute/utils.py:333
-#, python-format
-msgid ""
-"No host name specified for the notification of HostAPI.%s and it will be "
-"ignored"
-msgstr ""
-"No ha sido especificado un nombre de anfitrión para la notificación de "
-"HostAPI.%s y será ignorada"
+msgstr "No se ha especificado ningún host de cálculo"
-#: nova/compute/utils.py:461
+#: nova/compute/rpcapi.py:60
#, python-format
-msgid ""
-"Value of 0 or None specified for %s. This behaviour will change in "
-"meaning in the K release, to mean 'call at the default rate' rather than "
-"'do not call'. To keep the 'do not call' behaviour, use a negative value."
+msgid "Unable to find host for Instance %s"
+msgstr "No se puede encontrar el host para la instancia %s "
+
+#: nova/compute/stats.py:49
+msgid "Unexpected type adding stats"
msgstr ""
-"Un valor de 0 o Ninguno especificado para %s. Este comportamiento "
-"cambiará en el transcurso de la liberación K, para definir 'llamada en la"
-" tasa predeterminada' en lugar de 'no llamar'. Para mantener el "
-"comportamiento 'no llamar', utiliza un valor negativo."
-#: nova/compute/monitors/__init__.py:177
+#: nova/compute/monitors/__init__.py:176
#, python-format
msgid ""
"Excluding monitor %(monitor_name)s due to metric name overlap; "
@@ -6132,12 +5733,12 @@ msgstr ""
"Excluyendo el monitor %(monitor_name)s debido a superposición de nombre "
"de métrica; metricas superpuestas: %(overlap)s"
-#: nova/compute/monitors/__init__.py:185
+#: nova/compute/monitors/__init__.py:184
#, python-format
msgid "Monitor %(monitor_name)s cannot be used: %(ex)s"
msgstr "El monitor %(monitor_name)s no puede ser utilizado: %(ex)s"
-#: nova/compute/monitors/__init__.py:191
+#: nova/compute/monitors/__init__.py:190
#, python-format
msgid "The following monitors have been disabled: %s"
msgstr "Los siguientes monitores han sido deshabilitados: %s"
@@ -6149,27 +5750,27 @@ msgstr ""
"No todas las propiedades necesarias están implementadas en el controlador"
" de cómputo: %s"
-#: nova/conductor/api.py:300
+#: nova/conductor/api.py:315
msgid "nova-conductor connection established successfully"
msgstr ""
-#: nova/conductor/api.py:305
+#: nova/conductor/api.py:320
msgid ""
"Timed out waiting for nova-conductor. Is it running? Or did this service"
" start before nova-conductor? Reattempting establishment of nova-"
"conductor connection..."
msgstr ""
-#: nova/conductor/manager.py:124
+#: nova/conductor/manager.py:123
#, python-format
msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s"
msgstr "Se intentado actualizar instancia para '%(key)s' en %(instance_uuid)s"
-#: nova/conductor/manager.py:522
+#: nova/conductor/manager.py:519
msgid "No valid host found for cold migrate"
msgstr "No se ha encontrado anfitrión para migración en frío"
-#: nova/conductor/manager.py:586
+#: nova/conductor/manager.py:582
#, python-format
msgid ""
"Migration of instance %(instance_id)s to host %(dest)s unexpectedly "
@@ -6178,21 +5779,25 @@ msgstr ""
"La migración de la instancia %(instance_id)s al anfitrión %(dest)s ha "
"fallado inesperadamente."
-#: nova/conductor/manager.py:673
+#: nova/conductor/manager.py:669
#, python-format
msgid "Unshelve attempted but the image %s cannot be found."
msgstr "Se ha intentado la extracción pero la imagen %s no ha sido encontrada."
-#: nova/conductor/manager.py:696
+#: nova/conductor/manager.py:692
msgid "No valid host found for unshelve instance"
msgstr "No se ha encontrado anfitrión válido para extraer instancia"
-#: nova/conductor/manager.py:700
+#: nova/conductor/manager.py:696
msgid "Unshelve attempted but vm_state not SHELVED or SHELVED_OFFLOADED"
msgstr ""
"Se ha intentado desarchivar pero vm_state no se encuentra como SHELVED o "
"SHELVED_OFFLOADED"
+#: nova/conductor/manager.py:733
+msgid "No valid host found for rebuild"
+msgstr ""
+
#: nova/conductor/tasks/live_migrate.py:113
#, python-format
msgid ""
@@ -6265,51 +5870,51 @@ msgstr "No se ha podido notificar a las células la destrucción de instancia"
msgid "Failed to notify cells of instance update"
msgstr "No se ha podido notificar a las células la actualización de instancia"
-#: nova/db/api.py:1685
+#: nova/db/api.py:1683
msgid "Failed to notify cells of bw_usage update"
msgstr "No se ha podido notificar a las células la actualización de bw_usage"
-#: nova/db/sqlalchemy/api.py:204
+#: nova/db/sqlalchemy/api.py:207
#, python-format
msgid "Deadlock detected when running '%(func_name)s': Retrying..."
msgstr "Punto muerto detectado al ejecutar '%(func_name)s': Reintentando..."
-#: nova/db/sqlalchemy/api.py:245
+#: nova/db/sqlalchemy/api.py:248
msgid "model or base_model parameter should be subclass of NovaBase"
msgstr "El parámetro model o base_model debe ser una subclase de NovaBase"
-#: nova/db/sqlalchemy/api.py:258
-#: nova/openstack/common/db/sqlalchemy/utils.py:174
-#: nova/virt/baremetal/db/sqlalchemy/api.py:60
+#: nova/db/sqlalchemy/api.py:261
+#: nova/openstack/common/db/sqlalchemy/utils.py:173
+#: nova/virt/baremetal/db/sqlalchemy/api.py:61
#, python-format
msgid "Unrecognized read_deleted value '%s'"
msgstr "Valor de read_deleted no reconocido '%s'"
-#: nova/db/sqlalchemy/api.py:745
+#: nova/db/sqlalchemy/api.py:753
#, python-format
msgid "Invalid floating ip id %s in request"
msgstr "Identificador de dirección IP flotante inválida %s en solicitud"
-#: nova/db/sqlalchemy/api.py:850
+#: nova/db/sqlalchemy/api.py:858
msgid "Failed to update usages bulk deallocating floating IP"
msgstr "Fallo al actualizar uso de desasignación masiva de IP fotante"
-#: nova/db/sqlalchemy/api.py:1006
+#: nova/db/sqlalchemy/api.py:1007
#, python-format
msgid "Invalid floating IP %s in request"
msgstr "Dirección IP flotante inválida %s en la solicitud"
-#: nova/db/sqlalchemy/api.py:1308 nova/db/sqlalchemy/api.py:1347
+#: nova/db/sqlalchemy/api.py:1310 nova/db/sqlalchemy/api.py:1349
#, python-format
msgid "Invalid fixed IP Address %s in request"
msgstr "Dirección IP fija inválida %s en la solicitud"
-#: nova/db/sqlalchemy/api.py:1482
+#: nova/db/sqlalchemy/api.py:1484
#, python-format
msgid "Invalid virtual interface address %s in request"
msgstr "Dirección de interfaz virtual inválida %s en la solicitud"
-#: nova/db/sqlalchemy/api.py:1576
+#: nova/db/sqlalchemy/api.py:1578
#, python-format
msgid ""
"Unknown osapi_compute_unique_server_name_scope value: %s Flag must be "
@@ -6318,22 +5923,22 @@ msgstr ""
"Valor de osapi_compute_unique_server_name_scope desconocido: %s El "
"distintivo debe ser vacío, \"global\" o \"project\""
-#: nova/db/sqlalchemy/api.py:1735
+#: nova/db/sqlalchemy/api.py:1738
#, python-format
msgid "Invalid instance id %s in request"
msgstr "ID de instancia %s no válido en la solicitud."
-#: nova/db/sqlalchemy/api.py:2013
+#: nova/db/sqlalchemy/api.py:2017
#, python-format
msgid "Invalid field name: %s"
msgstr "Campo de nombre inválido: %s"
-#: nova/db/sqlalchemy/api.py:3242
+#: nova/db/sqlalchemy/api.py:3246
#, python-format
msgid "Change will make usage less than 0 for the following resources: %s"
msgstr "El cambio hará el uso menos de 0 para los siguientes recursos: %s"
-#: nova/db/sqlalchemy/api.py:4892
+#: nova/db/sqlalchemy/api.py:4898
#, python-format
msgid ""
"Volume(%s) has lower stats then what is in the database. Instance must "
@@ -6343,14 +5948,14 @@ msgstr ""
"datos. la instancia debió haber reiniciado o colapsado. Actualizando los "
"totales."
-#: nova/db/sqlalchemy/api.py:5249
+#: nova/db/sqlalchemy/api.py:5262
#, python-format
msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries"
msgstr ""
"Fallo en adición de metadata para el agregado %(id)s después de "
"%(retries)s intentos"
-#: nova/db/sqlalchemy/api.py:5639
+#: nova/db/sqlalchemy/api.py:5652
#, python-format
msgid "IntegrityError detected when archiving table %s"
msgstr "Se ha detectado un IntegrityError al archivar la tabla %s"
@@ -6385,15 +5990,15 @@ msgstr ""
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr "Columna extra %(table)s.%(column)s en la tabla shadow"
-#: nova/db/sqlalchemy/utils.py:105
+#: nova/db/sqlalchemy/utils.py:103
msgid "Specify `table_name` or `table` param"
msgstr "Especificar parámetro `table_name` o `table`"
-#: nova/db/sqlalchemy/utils.py:108
+#: nova/db/sqlalchemy/utils.py:106
msgid "Specify only one param `table_name` `table`"
msgstr "Especificar solamente un parámetro `table_name` `table`"
-#: nova/db/sqlalchemy/utils.py:131 nova/db/sqlalchemy/utils.py:135
+#: nova/db/sqlalchemy/utils.py:129 nova/db/sqlalchemy/utils.py:133
#: nova/db/sqlalchemy/migrate_repo/versions/216_havana.py:84
#: nova/db/sqlalchemy/migrate_repo/versions/216_havana.py:1103
msgid "Exception while creating table."
@@ -6403,7 +6008,7 @@ msgstr "Excepción al crear la tabla."
msgid "Exception while seeding instance_types table"
msgstr "Excepción al iniciar la tabla instance_types"
-#: nova/image/glance.py:231
+#: nova/image/glance.py:235
#, python-format
msgid ""
"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', "
@@ -6412,7 +6017,7 @@ msgstr ""
"Error al contactar con el servidor de glance '%(host)s:%(port)s' para "
"'%(method)s', %(extra)s."
-#: nova/image/glance.py:265
+#: nova/image/glance.py:267
#, python-format
msgid ""
"When loading the module %(module_str)s the following error occurred: "
@@ -6421,12 +6026,12 @@ msgstr ""
"Al cargar el módulo %(module_str)s se ha presentado el siguiente error: "
"%(ex)s"
-#: nova/image/glance.py:303
+#: nova/image/glance.py:326
#, python-format
msgid "Failed to instantiate the download handler for %(scheme)s"
msgstr "Fallo al instanciar el manejador de descargas para %(scheme)s"
-#: nova/image/glance.py:319
+#: nova/image/glance.py:342
#, python-format
msgid "Successfully transferred using %s"
msgstr "Exitosamente transferido utilizando %s"
@@ -6587,16 +6192,16 @@ msgstr ""
msgid "Not deleting key %s"
msgstr "Sin eliminar la clave %s"
-#: nova/network/api.py:198 nova/network/neutronv2/api.py:797
+#: nova/network/api.py:196 nova/network/neutronv2/api.py:845
#, python-format
msgid "re-assign floating IP %(address)s from instance %(instance_id)s"
msgstr "volver a asignar IP flotante %(address)s desde instancia %(instance_id)s"
-#: nova/network/base_api.py:49
+#: nova/network/base_api.py:48
msgid "Failed storing info cache"
msgstr "Ha fallado el almacenamiento de memoria caché de información"
-#: nova/network/base_api.py:68
+#: nova/network/base_api.py:67
msgid "instance is a required argument to use @refresh_cache"
msgstr "la instancia es un argumento necesario para utilizar @refresh_cache "
@@ -6609,51 +6214,51 @@ msgstr "La opción de controlador de red es necesaria, pero no se ha especificad
msgid "Loading network driver '%s'"
msgstr "Cargando controlador de red '%s'"
-#: nova/network/floating_ips.py:90
+#: nova/network/floating_ips.py:85
#, python-format
msgid "Fixed ip %s not found"
msgstr "Direción IP fija %s no encontrada"
-#: nova/network/floating_ips.py:180
+#: nova/network/floating_ips.py:176
#, python-format
msgid "Floating IP %s is not associated. Ignore."
msgstr "La IP flotante %s no está asociada. Ignorar."
-#: nova/network/floating_ips.py:199
+#: nova/network/floating_ips.py:195
#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr "La dirección |%(address)s| no está asignada"
-#: nova/network/floating_ips.py:203
+#: nova/network/floating_ips.py:199
#, python-format
msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
msgstr "La dirección |%(address)s| no está asignada al proyecto |%(project)s|"
-#: nova/network/floating_ips.py:223
+#: nova/network/floating_ips.py:219
#, python-format
msgid "Quota exceeded for %s, tried to allocate floating IP"
msgstr "Cuota excedida para %s, intentando asignar dirección IP flotante"
-#: nova/network/floating_ips.py:283
+#: nova/network/floating_ips.py:278
msgid "Failed to update usages deallocating floating IP"
msgstr "No se han podido actualizar los usos desasignando IP flotante "
-#: nova/network/floating_ips.py:385
+#: nova/network/floating_ips.py:376
#, python-format
msgid "Failed to disassociated floating address: %s"
msgstr "Fallo al desasociar la dirección IP flotante: %s"
-#: nova/network/floating_ips.py:390
+#: nova/network/floating_ips.py:381
#, python-format
msgid "Interface %s not found"
msgstr "Interfaz %s no encontrada"
-#: nova/network/floating_ips.py:553
+#: nova/network/floating_ips.py:540
#, python-format
msgid "Starting migration network for instance %s"
msgstr "Comenzando migración de red para la instancia %s"
-#: nova/network/floating_ips.py:560
+#: nova/network/floating_ips.py:546
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
@@ -6662,12 +6267,12 @@ msgstr ""
"La dirección IP flotante | %(address)s | ya no pertentece a la instancia "
"%(instance_uuid)s. No será migrada"
-#: nova/network/floating_ips.py:593
+#: nova/network/floating_ips.py:575
#, python-format
msgid "Finishing migration network for instance %s"
msgstr "Finalizando la migración de red para la instancia %s"
-#: nova/network/floating_ips.py:601
+#: nova/network/floating_ips.py:582
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
@@ -6676,7 +6281,7 @@ msgstr ""
"La dirección IP flotante |%(address)s| ya no pertenece a la instancia "
"%(instance_uuid)s. No se configurará."
-#: nova/network/floating_ips.py:644
+#: nova/network/floating_ips.py:625
#, python-format
msgid ""
"Database inconsistency: DNS domain |%s| is registered in the Nova db but "
@@ -6687,12 +6292,12 @@ msgstr ""
"base de datos Nova pero no es visible para el controlador DNS de "
"instancia o flotante. Se ignorará."
-#: nova/network/floating_ips.py:684
+#: nova/network/floating_ips.py:665
#, python-format
msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
msgstr "El dominio |%(domain)s| ya existe, cambiando zona a |%(av_zone)s|."
-#: nova/network/floating_ips.py:693
+#: nova/network/floating_ips.py:674
#, python-format
msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
msgstr "El dominio |%(domain)s| ya existe, cambiando el proyecto a |%(project)s."
@@ -6723,17 +6328,17 @@ msgstr "Este controlador sólo soporta entradas de tipo 'a'."
msgid "This shouldn't be getting called except during testing."
msgstr "Esto no se debe llamar excepto durante las pruebas. "
-#: nova/network/linux_net.py:227
+#: nova/network/linux_net.py:232
#, python-format
msgid "Attempted to remove chain %s which does not exist"
msgstr "Se ha intentado eliminar la cadena %s que no existe"
-#: nova/network/linux_net.py:263
+#: nova/network/linux_net.py:268
#, python-format
msgid "Unknown chain: %r"
msgstr "Cadena desconocida: %r"
-#: nova/network/linux_net.py:294
+#: nova/network/linux_net.py:301
#, python-format
msgid ""
"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r "
@@ -6742,52 +6347,52 @@ msgstr ""
"Se ha intentado eliminar una regla que no estaba allí: %(chain)r %(rule)r"
" %(wrap)r %(top)r"
-#: nova/network/linux_net.py:762
+#: nova/network/linux_net.py:777
#, python-format
msgid "Removed %(num)d duplicate rules for floating ip %(float)s"
msgstr "Se han eliminado %(num)d reglas duplicadas para la IP flotante %(float)s"
-#: nova/network/linux_net.py:810
+#: nova/network/linux_net.py:825
#, python-format
msgid "Error deleting conntrack entries for %s"
msgstr "Error al eliminar las entradas conntrack para %s"
-#: nova/network/linux_net.py:1068
+#: nova/network/linux_net.py:1091
#, python-format
msgid "Hupping dnsmasq threw %s"
msgstr "Excepción al recargar la configuración de dnsmasq: %s"
-#: nova/network/linux_net.py:1150
+#: nova/network/linux_net.py:1172
#, python-format
msgid "killing radvd threw %s"
msgstr "Matando radvd lanzado %s"
-#: nova/network/linux_net.py:1302
+#: nova/network/linux_net.py:1333
#, python-format
msgid "Unable to execute %(cmd)s. Exception: %(exception)s"
msgstr "No se puede ejecutar %(cmd)s. Excepción: %(exception)s"
-#: nova/network/linux_net.py:1360
+#: nova/network/linux_net.py:1391
#, python-format
msgid "Failed removing net device: '%s'"
msgstr "Fallo al remover dispositivo de red: '%s'"
-#: nova/network/linux_net.py:1532
+#: nova/network/linux_net.py:1568
#, python-format
msgid "Adding interface %(interface)s to bridge %(bridge)s"
msgstr "Añadiendo la interfaz %(interface)s al puente %(bridge)s"
-#: nova/network/linux_net.py:1538
+#: nova/network/linux_net.py:1574
#, python-format
msgid "Failed to add interface: %s"
msgstr "No se ha podido añadir interfaz: %s "
-#: nova/network/manager.py:836
+#: nova/network/manager.py:813
#, python-format
msgid "instance-dns-zone not found |%s|."
msgstr "instance-dns-zone no encontrada |%s|"
-#: nova/network/manager.py:843
+#: nova/network/manager.py:820
#, python-format
msgid ""
"instance-dns-zone is |%(domain)s|, which is in availability zone "
@@ -6798,56 +6403,51 @@ msgstr ""
"|%(zone)s|. La instancia está en la zona |%(zone2)s|. No se creará ningún"
" registro de DNS."
-#: nova/network/manager.py:882
-#, python-format
-msgid "Quota exceeded for %s, tried to allocate fixed IP"
-msgstr "Cuota excedida para %s, intentando asignar dirección IP flotante"
-
-#: nova/network/manager.py:942
+#: nova/network/manager.py:943
msgid "Error cleaning up fixed ip allocation. Manual cleanup may be required."
msgstr ""
-#: nova/network/manager.py:972
+#: nova/network/manager.py:973
msgid "Failed to update usages deallocating fixed IP"
msgstr ""
"Se ha encontrado un error en la actualización de los usos desasignando IP"
" flotante"
-#: nova/network/manager.py:996
+#: nova/network/manager.py:997
#, python-format
msgid "Unable to release %s because vif doesn't exist."
msgstr "No se puede liberar %s porque vif no existe."
-#: nova/network/manager.py:1037
+#: nova/network/manager.py:1038
#, python-format
msgid "IP %s leased that is not associated"
msgstr "La IP %s alquilada que no está asociada "
-#: nova/network/manager.py:1043
+#: nova/network/manager.py:1044
#, python-format
msgid "IP |%s| leased that isn't allocated"
msgstr "IP |%s| alquilada que no está asignada"
-#: nova/network/manager.py:1052
+#: nova/network/manager.py:1053
#, python-format
msgid "IP %s released that is not associated"
msgstr "IP %s liberada que no está asociada"
-#: nova/network/manager.py:1056
+#: nova/network/manager.py:1057
#, python-format
msgid "IP %s released that was not leased"
msgstr "IP %s liberada que no está alquilada"
-#: nova/network/manager.py:1074
+#: nova/network/manager.py:1075
#, python-format
msgid "%s must be an integer"
msgstr "%s debe ser un entero "
-#: nova/network/manager.py:1106
+#: nova/network/manager.py:1107
msgid "Maximum allowed length for 'label' is 255."
msgstr "La longitud máxima permitida para 'label' es 255."
-#: nova/network/manager.py:1126
+#: nova/network/manager.py:1127
#, python-format
msgid ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
@@ -6856,18 +6456,18 @@ msgstr ""
"Subred(es) demasiado grande(s), se usará el valor predeterminado /%s. "
"Para sustituirlo, especifique el distintivo network_size."
-#: nova/network/manager.py:1211
+#: nova/network/manager.py:1212
msgid "cidr already in use"
msgstr "cidr ya se está utilizando"
-#: nova/network/manager.py:1214
+#: nova/network/manager.py:1215
#, python-format
msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
msgstr ""
"cidr solicitado (%(cidr)s) está en conflicto con superred existente "
"(%(super)s)"
-#: nova/network/manager.py:1225
+#: nova/network/manager.py:1226
#, python-format
msgid ""
"requested cidr (%(cidr)s) conflicts with existing smaller cidr "
@@ -6881,7 +6481,7 @@ msgstr ""
msgid "Network must be disassociated from project %s before delete"
msgstr "La red se debe desasociar el proyecto %s antes de la supresión"
-#: nova/network/manager.py:1949
+#: nova/network/manager.py:1955
msgid ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
@@ -6889,7 +6489,7 @@ msgstr ""
"La suma entre el número de redes y el inicio de vlan no puede ser mayor "
"que 4094"
-#: nova/network/manager.py:1956
+#: nova/network/manager.py:1962
#, python-format
msgid ""
"The network range is not big enough to fit %(num_networks)s networks. "
@@ -6921,30 +6521,23 @@ msgstr "Se ha eliminado %s"
msgid "Cannot delete domain |%s|"
msgstr "No se puede suprimir el dominio |%s|"
-#: nova/network/model.py:94
+#: nova/network/model.py:96
#, python-format
msgid "Invalid IP format %s"
msgstr "Formato IP inválido %s"
-#: nova/network/neutronv2/api.py:212
-msgid "Neutron error: quota exceeded"
-msgstr "Error de Neutron: cuota excedida"
-
-#: nova/network/neutronv2/api.py:215
-#, python-format
-msgid "Neutron error creating port on network %s"
-msgstr "Error de Neutron al crear puerto en la red: %s"
-
-#: nova/network/neutronv2/api.py:248
+#: nova/network/neutronv2/api.py:269
#, python-format
msgid "empty project id for instance %s"
msgstr "ID de proyecto vacío para la instancia %s"
-#: nova/network/neutronv2/api.py:283
-msgid "No network configured!"
-msgstr "No hay red configurada!"
+#: nova/network/neutronv2/api.py:313 nova/network/neutronv2/api.py:678
+msgid "Multiple possible networks found, use a Network ID to be more specific."
+msgstr ""
+"Se han encontrado múltiples redes posibles, usa un identificador de red "
+"para ser más específico."
-#: nova/network/neutronv2/api.py:303
+#: nova/network/neutronv2/api.py:335
#, python-format
msgid ""
"Multiple security groups found matching '%s'. Use an ID to be more "
@@ -6953,89 +6546,18 @@ msgstr ""
"Se han encontrado varios grupos de seguridad que coinciden con '%s'. "
"Utilice un ID para ser más específico."
-#: nova/network/neutronv2/api.py:373
-#, python-format
-msgid "Failed to update port %s"
-msgstr "Falló al actualizar el puerto %s"
-
-#: nova/network/neutronv2/api.py:380
-#, python-format
-msgid "Failed to delete port %s"
-msgstr "Ha ocurrido un fallo al eliminar el puerto %s"
-
-#: nova/network/neutronv2/api.py:443
+#: nova/network/neutronv2/api.py:489
#, python-format
msgid "Unable to reset device ID for port %s"
msgstr ""
-#: nova/network/neutronv2/api.py:451
-#, python-format
-msgid "Port %s does not exist"
-msgstr "El puerto %s no existe"
-
-#: nova/network/neutronv2/api.py:454 nova/network/neutronv2/api.py:478
-#, python-format
-msgid "Failed to delete neutron port %s"
-msgstr "Fallo al eliminar el puerto de neutron %s"
-
-#: nova/network/neutronv2/api.py:576
-#, python-format
-msgid ""
-"Unable to update port %(portid)s on subnet %(subnet_id)s with failure: "
-"%(exception)s"
-msgstr ""
-"No se ha podido actualizar el puerto %(portid)s en la subred "
-"%(subnet_id)s con la anomalía: %(exception)s"
-
-#: nova/network/neutronv2/api.py:605
-#, python-format
-msgid "Unable to update port %(portid)s with failure: %(exception)s"
-msgstr "No se puede actualizar el puerto %(portid)s con anomalía: %(exception)s"
-
-#: nova/network/neutronv2/api.py:632
-msgid "Multiple possible networks found, use a Network ID to be more specific."
-msgstr ""
-"Se han encontrado múltiples redes posibles, usa un identificador de red "
-"para ser más específico."
-
-#: nova/network/neutronv2/api.py:651
-#, python-format
-msgid "Failed to access port %s"
-msgstr "Fallo al acceder al puerto %s"
-
-#: nova/network/neutronv2/api.py:880
-#, python-format
-msgid "Unable to access floating IP %s"
-msgstr "Incapaz de acceder a la Ip flotante %s"
-
-#: nova/network/neutronv2/api.py:968
+#: nova/network/neutronv2/api.py:1021
#, python-format
msgid "Multiple floating IP pools matches found for name '%s'"
msgstr ""
"Se han encontrado varias coincidencias de agrupaciones de IP flotante "
"para el nombre '%s' "
-#: nova/network/neutronv2/api.py:1012
-#, python-format
-msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s"
-msgstr ""
-"Incapaz de acceder a la IP flotante %(fixed_ip)s para el puerto "
-"%(port_id)s"
-
-#: nova/network/neutronv2/api.py:1071
-#, python-format
-msgid "Unable to update host of port %s"
-msgstr "Incapaz de actualizar el anfitrión del puerto %s"
-
-#: nova/network/neutronv2/api.py:1107
-#, python-format
-msgid ""
-"Network %(id)s not matched with the tenants network! The ports tenant "
-"%(tenant_id)s will be used."
-msgstr ""
-"La red %(id)s no coincide con las redes de los inquilinos! El puerto del "
-"inquilino %(tenant_id)s será utilizado."
-
#: nova/network/security_group/neutron_driver.py:57
#, python-format
msgid "Neutron Error creating security group %s"
@@ -7124,6 +6646,14 @@ msgstr ""
"El grupo de seguridad %(security_group_name)s no está asociado a la "
"instancia %(instance)s"
+#: nova/network/security_group/security_group_base.py:89
+msgid "Type and Code must be integers for ICMP protocol type"
+msgstr ""
+
+#: nova/network/security_group/security_group_base.py:92
+msgid "To and From ports must be integers"
+msgstr ""
+
#: nova/network/security_group/security_group_base.py:134
#, python-format
msgid "This rule already exists in group %s"
@@ -7134,22 +6664,22 @@ msgstr "Esta regla ya existe en el grupo %s"
msgid "Error setting %(attr)s"
msgstr "Error al establecer %(attr)s"
-#: nova/objects/base.py:247
+#: nova/objects/base.py:262
#, python-format
msgid "Unable to instantiate unregistered object type %(objtype)s"
msgstr "Incapaz de instanciar tipo de objeto no registrado %(objtype)s"
-#: nova/objects/base.py:366
+#: nova/objects/base.py:381
#, python-format
msgid "Cannot load '%s' in the base class"
msgstr "No se puede cargar '%s' en la clase base"
-#: nova/objects/base.py:412
+#: nova/objects/base.py:427
#, python-format
msgid "%(objname)s object has no attribute '%(attrname)s'"
msgstr "El objeto %(objname)s no tiene atributo '%(attrname)s'"
-#: nova/objects/block_device.py:136
+#: nova/objects/block_device.py:149
msgid "Volume does not belong to the requested instance."
msgstr "El volumen no pertenece a la instancia solicitada."
@@ -7163,44 +6693,44 @@ msgstr "La clave %(key)s debe ser de tipo %(expected)s y no del tipo %(actual)s"
msgid "Element %(key)s:%(val)s must be of type %(expected)s not %(actual)s"
msgstr "El elemento %(key)s:%(val)s debe ser de tipo %(expected)s y no %(actual)s"
-#: nova/objects/fields.py:157
+#: nova/objects/fields.py:165
#, python-format
msgid "Field `%s' cannot be None"
msgstr "El campo `%s' no puede ser Ninguno"
-#: nova/objects/fields.py:232
+#: nova/objects/fields.py:246
#, python-format
msgid "A string is required here, not %s"
msgstr "Se requiere una cadena aqui, no %s"
-#: nova/objects/fields.py:268
+#: nova/objects/fields.py:286
msgid "A datetime.datetime is required here"
msgstr "Se requiere un datetime.datetime aquí"
-#: nova/objects/fields.py:306 nova/objects/fields.py:315
-#: nova/objects/fields.py:324
+#: nova/objects/fields.py:328 nova/objects/fields.py:337
+#: nova/objects/fields.py:346
#, python-format
msgid "Network \"%s\" is not valid"
msgstr "La red \"%s\" no es válida"
-#: nova/objects/fields.py:363
+#: nova/objects/fields.py:385
msgid "A list is required here"
msgstr "Aquí se requiere una lista"
-#: nova/objects/fields.py:379
+#: nova/objects/fields.py:405
msgid "A dict is required here"
msgstr "Aquí se requiere un diccionario"
-#: nova/objects/fields.py:418
+#: nova/objects/fields.py:449
#, python-format
msgid "An object of type %s is required here"
msgstr "Aquí se requiere un objeto del tipo %s"
-#: nova/objects/fields.py:445
+#: nova/objects/fields.py:488
msgid "A NetworkModel is required here"
msgstr "aquí se requiere un NetworkModel"
-#: nova/objects/instance.py:432
+#: nova/objects/instance.py:433
#, python-format
msgid "No save handler for %s"
msgstr "No hay manejador de guardado para %s"
@@ -7211,11 +6741,11 @@ msgstr ""
"No se ha podido notificar a las células la actualización de memoria caché"
" de información de instancia"
-#: nova/openstack/common/gettextutils.py:320
+#: nova/openstack/common/gettextutils.py:301
msgid "Message objects do not support addition."
msgstr "Los objetos de mensaje no soportan adición."
-#: nova/openstack/common/gettextutils.py:330
+#: nova/openstack/common/gettextutils.py:311
msgid ""
"Message objects do not support str() because they may contain non-ascii "
"characters. Please use unicode() or translate() instead."
@@ -7235,32 +6765,32 @@ msgstr ""
"Se ha encontrado la lista de instantáneas pero no se ha encontrado "
"ninguna cabecera."
-#: nova/openstack/common/lockutils.py:102
+#: nova/openstack/common/lockutils.py:101
#, python-format
msgid "Unable to acquire lock on `%(filename)s` due to %(exception)s"
msgstr ""
-#: nova/openstack/common/log.py:327
+#: nova/openstack/common/log.py:289
#, python-format
msgid "Deprecated: %s"
msgstr "En desuso: %s"
-#: nova/openstack/common/log.py:436
+#: nova/openstack/common/log.py:397
#, python-format
msgid "Error loading logging config %(log_config)s: %(err_msg)s"
msgstr "Error al cargar la configuración de registro %(log_config)s: %(err_msg)s"
-#: nova/openstack/common/log.py:486
+#: nova/openstack/common/log.py:458
#, python-format
msgid "syslog facility must be one of: %s"
msgstr "El recurso syslog debe ser uno de: %s"
-#: nova/openstack/common/log.py:729
+#: nova/openstack/common/log.py:709
#, python-format
msgid "Fatal call to deprecated config: %(msg)s"
msgstr "Llamada muy grave a configuración en desuso: %(msg)s"
-#: nova/openstack/common/periodic_task.py:39
+#: nova/openstack/common/periodic_task.py:40
#, python-format
msgid "Unexpected argument for periodic task creation: %(arg)s."
msgstr "Argumento inesperado para la creación de tarea periódica: %(arg)s."
@@ -7323,27 +6853,27 @@ msgstr "Entorno no soportado a través de SSH"
msgid "process_input not supported over SSH"
msgstr "entrada de proceso no soporta a través de SSH"
-#: nova/openstack/common/sslutils.py:98
+#: nova/openstack/common/sslutils.py:95
#, python-format
msgid "Invalid SSL version : %s"
msgstr "Versión SSL inválida : %s"
-#: nova/openstack/common/strutils.py:92
+#: nova/openstack/common/strutils.py:114
#, python-format
msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s"
msgstr "Valor '%(val)s' no reconocido, los valores aceptables son: %(acceptable)s"
-#: nova/openstack/common/strutils.py:202
+#: nova/openstack/common/strutils.py:219
#, python-format
msgid "Invalid unit system: \"%s\""
msgstr "Unidad del sistema no valida: \"%s\""
-#: nova/openstack/common/strutils.py:211
+#: nova/openstack/common/strutils.py:228
#, python-format
msgid "Invalid string format: %s"
msgstr "Formato inválido de cadena: %s"
-#: nova/openstack/common/versionutils.py:69
+#: nova/openstack/common/versionutils.py:86
#, python-format
msgid ""
"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and "
@@ -7352,7 +6882,7 @@ msgstr ""
"%(what)s es obsoleto así como %(as_of)s en beneficio de %(in_favor_of)s y"
" puede ser removido en %(remove_in)s."
-#: nova/openstack/common/versionutils.py:73
+#: nova/openstack/common/versionutils.py:90
#, python-format
msgid ""
"%(what)s is deprecated as of %(as_of)s and may be removed in "
@@ -7361,6 +6891,16 @@ msgstr ""
"%(what)s está obsoleto así como %(as_of)s y puede ser removido en "
"%(remove_in)s. Y no se sustituirá."
+#: nova/openstack/common/versionutils.py:94
+#, python-format
+msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s."
+msgstr ""
+
+#: nova/openstack/common/versionutils.py:97
+#, python-format
+msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded."
+msgstr ""
+
#: nova/openstack/common/db/sqlalchemy/migration.py:226
#, python-format
msgid ""
@@ -7378,11 +6918,11 @@ msgstr ""
"La base de datos no está en control de versión, pero tiene tablas. Por "
"favor indica la versión actual del esquema manualmente."
-#: nova/openstack/common/db/sqlalchemy/utils.py:119
+#: nova/openstack/common/db/sqlalchemy/utils.py:118
msgid "Unknown sort direction, must be 'desc' or 'asc'"
msgstr "Dirección de clasificación desconocida, debe ser 'desc' o ' asc'"
-#: nova/openstack/common/db/sqlalchemy/utils.py:162
+#: nova/openstack/common/db/sqlalchemy/utils.py:161
#, python-format
msgid ""
"There is no `deleted` column in `%s` table. Project doesn't use soft-"
@@ -7391,7 +6931,7 @@ msgstr ""
"No existe la columna `deleted` en la tbala `%s`. El projecto on utiliza "
"la característica de remoción suave."
-#: nova/openstack/common/db/sqlalchemy/utils.py:181
+#: nova/openstack/common/db/sqlalchemy/utils.py:180
#, python-format
msgid "There is no `project_id` column in `%s` table."
msgstr "No existe la columna `project_id` en la tabla `%s`."
@@ -7422,7 +6962,7 @@ msgstr ""
msgid "Unsupported id columns type"
msgstr "Tipo de identificador de columnas no soportado"
-#: nova/pci/pci_manager.py:156
+#: nova/pci/pci_manager.py:113
#, python-format
msgid ""
"Trying to remove device with %(status)s ownership %(instance_uuid)s "
@@ -7456,7 +6996,7 @@ msgstr "El controlador debe implementar schedule_run_instance"
msgid "Driver must implement select_destinations"
msgstr "El controlador debe implementar select_destinatios"
-#: nova/scheduler/filter_scheduler.py:80
+#: nova/scheduler/filter_scheduler.py:84
#, python-format
msgid ""
"Attempting to build %(num_instances)d instance(s) uuids: "
@@ -7465,21 +7005,29 @@ msgstr ""
"Intentando construir %(num_instances)d instancia(s) con uuids: "
"%(instance_uuids)s"
-#: nova/scheduler/filter_scheduler.py:109
+#: nova/scheduler/filter_scheduler.py:113
#, python-format
msgid "Choosing host %(weighed_host)s for instance %(instance_uuid)s"
msgstr "Eligiendo anfitrión %(weighed_host)s para la instancia %(instance_uuid)s"
-#: nova/scheduler/filter_scheduler.py:170
+#: nova/scheduler/filter_scheduler.py:173
msgid "Instance disappeared during scheduling"
msgstr "La instancia ha desaparecido durante la programación"
-#: nova/scheduler/host_manager.py:173
+#: nova/scheduler/filter_scheduler.py:219
+msgid "ServerGroupAffinityFilter not configured"
+msgstr ""
+
+#: nova/scheduler/filter_scheduler.py:224
+msgid "ServerGroupAntiAffinityFilter not configured"
+msgstr ""
+
+#: nova/scheduler/host_manager.py:169
#, python-format
msgid "Metric name unknown of %r"
msgstr "Nombre de métrica desconocido para %r"
-#: nova/scheduler/host_manager.py:188
+#: nova/scheduler/host_manager.py:184
#, python-format
msgid ""
"Host has more disk space than database expected (%(physical)sgb > "
@@ -7488,42 +7036,41 @@ msgstr ""
"El anfitrión tiene más espacio en disco que lo esperado por la base de "
"datos (%(physical)sgb > %(database)sgb)"
-#: nova/scheduler/host_manager.py:365
+#: nova/scheduler/host_manager.py:311
#, python-format
msgid "Host filter ignoring hosts: %s"
msgstr "Filtro de anfitrión ignorando huéspedes: %s"
-#: nova/scheduler/host_manager.py:377
+#: nova/scheduler/host_manager.py:323
#, python-format
msgid "Host filter forcing available hosts to %s"
msgstr "Filtro de anfitrión forzando a los huéspedes disponibles a %s"
-#: nova/scheduler/host_manager.py:380
+#: nova/scheduler/host_manager.py:326
#, python-format
msgid "No hosts matched due to not matching 'force_hosts' value of '%s'"
msgstr ""
"No se han relacionado anfitriones debido a que no hay valores "
"relacionados de '%s' a 'force_hosts'"
-#: nova/scheduler/host_manager.py:393
+#: nova/scheduler/host_manager.py:339
#, python-format
msgid "Host filter forcing available nodes to %s"
msgstr "Filtro de anfitriones forzando nodos disponibles a %s"
-#: nova/scheduler/host_manager.py:396
+#: nova/scheduler/host_manager.py:342
#, python-format
msgid "No nodes matched due to not matching 'force_nodes' value of '%s'"
msgstr ""
"No se han relacionado nodos debido a que no hay valores relacionados de "
"'%s' a 'force_nodes'"
-#: nova/scheduler/host_manager.py:444
-#: nova/scheduler/filters/trusted_filter.py:208
+#: nova/scheduler/host_manager.py:390
#, python-format
msgid "No service for compute ID %s"
msgstr "No hay servicio para el ID de cálculo %s "
-#: nova/scheduler/host_manager.py:462
+#: nova/scheduler/host_manager.py:408
#, python-format
msgid "Removing dead compute node %(host)s:%(node)s from scheduler"
msgstr "Eliminando nodo de cálculo inactivo %(host)s:%(node)s del planificador"
@@ -7561,7 +7108,7 @@ msgstr "Error del último host: %(last_host)s (nodo %(last_node)s): %(exc)s"
msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
msgstr "Valor no válido para 'scheduler_max_attempts', debe ser >= 1 "
-#: nova/scheduler/utils.py:233
+#: nova/scheduler/utils.py:231
#, python-format
msgid "Ignoring the invalid elements of the option %(name)s: %(options)s"
msgstr "Ignorando los elementos inválidos de la opción %(name)s: %(options)s"
@@ -7571,6 +7118,12 @@ msgstr "Ignorando los elementos inválidos de la opción %(name)s: %(options)s"
msgid "%(host_state)s has not been heard from in a while"
msgstr "%(host_state)s no ha sido recibido durante un tiempo"
+#: nova/scheduler/filters/exact_core_filter.py:36
+msgid "VCPUs not set; assuming CPU collection broken"
+msgstr ""
+"VCPU no establecidas; suponiendo que la colección de CPU se ha "
+"interrumpido"
+
#: nova/servicegroup/api.py:70
#, python-format
msgid "unknown ServiceGroup driver name: %s"
@@ -7671,16 +7224,6 @@ msgstr ""
msgid "ZooKeeperDriver.leave: %(id)s has not joined to the %(gr)s group"
msgstr "ZooKeeperDriver.leave: %(id)s no se ha unido al grupo %(gr)s"
-#: nova/storage/linuxscsi.py:100
-#, python-format
-msgid "Multipath call failed exit (%(code)s)"
-msgstr "La llamada a multivía de acceso no ha podido salir (%(code)s)"
-
-#: nova/storage/linuxscsi.py:121
-#, python-format
-msgid "Couldn't find multipath device %s"
-msgstr "No se puede encontrar el dispositivo multiruta %s"
-
#: nova/tests/fake_ldap.py:33
msgid "Attempted to instantiate singleton"
msgstr "Intento de instanciar sigleton"
@@ -7689,15 +7232,15 @@ msgstr "Intento de instanciar sigleton"
msgid "status must be available"
msgstr "el estado debe ser disponible"
-#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:245
+#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:290
msgid "already attached"
msgstr "ya está conectado"
-#: nova/tests/fake_volume.py:195 nova/volume/cinder.py:256
+#: nova/tests/fake_volume.py:195 nova/volume/cinder.py:301
msgid "Instance and volume not in same availability_zone"
msgstr "La instancia y el volumen no están en la misma availability_zone"
-#: nova/tests/fake_volume.py:200 nova/volume/cinder.py:262
+#: nova/tests/fake_volume.py:200 nova/volume/cinder.py:307
msgid "already detached"
msgstr "ya está desconectado"
@@ -7705,8 +7248,12 @@ msgstr "ya está desconectado"
msgid "unexpected role header"
msgstr "cabecera de rol inesperada"
-#: nova/tests/api/openstack/compute/test_servers.py:3202
-#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2425
+#: nova/tests/api/openstack/test_faults.py:47
+msgid "Should be translated."
+msgstr ""
+
+#: nova/tests/api/openstack/compute/test_servers.py:3279
+#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2438
msgid ""
"Quota exceeded for instances: Requested 1, but already used 10 of 10 "
"instances"
@@ -7714,46 +7261,46 @@ msgstr ""
"Se ha superado la cuota para las instancias: solicitada 1, pero ya se han"
" utilizado 10 de 10 instancias"
-#: nova/tests/api/openstack/compute/test_servers.py:3207
-#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2430
+#: nova/tests/api/openstack/compute/test_servers.py:3284
+#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2443
msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram"
msgstr ""
"Se ha superado la cuota para ram: Solicitadas 4096, ya utilizadas 8192 de"
" 10240 ram"
-#: nova/tests/api/openstack/compute/test_servers.py:3212
-#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2435
+#: nova/tests/api/openstack/compute/test_servers.py:3289
+#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2448
msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores"
msgstr ""
"Se ha superado la cuota para núcleos: Solicitados 2, pero ya utilizados 9"
" de 10 núcleos"
-#: nova/tests/compute/test_compute.py:1680
-#: nova/tests/compute/test_compute.py:1707
-#: nova/tests/compute/test_compute.py:1785
-#: nova/tests/compute/test_compute.py:1825
-#: nova/tests/compute/test_compute.py:5546
+#: nova/tests/compute/test_compute.py:1770
+#: nova/tests/compute/test_compute.py:1797
+#: nova/tests/compute/test_compute.py:1875
+#: nova/tests/compute/test_compute.py:1915
+#: nova/tests/compute/test_compute.py:5718
#, python-format
msgid "Running instances: %s"
msgstr "Ejecutando instancias: %s"
-#: nova/tests/compute/test_compute.py:1687
-#: nova/tests/compute/test_compute.py:1755
-#: nova/tests/compute/test_compute.py:1793
+#: nova/tests/compute/test_compute.py:1777
+#: nova/tests/compute/test_compute.py:1845
+#: nova/tests/compute/test_compute.py:1883
#, python-format
msgid "After terminating instances: %s"
msgstr "Después de terminar las instancias: %s"
-#: nova/tests/compute/test_compute.py:5557
+#: nova/tests/compute/test_compute.py:5729
#, python-format
msgid "After force-killing instances: %s"
msgstr "Después de finalizar de forma forzada las instancias: %s"
-#: nova/tests/compute/test_compute.py:6173
+#: nova/tests/compute/test_compute.py:6345
msgid "wrong host/node"
msgstr "host/nodo incorrecto"
-#: nova/tests/compute/test_compute.py:10753
+#: nova/tests/compute/test_compute.py:10999
msgid "spawn error"
msgstr "error de generación"
@@ -7761,7 +7308,16 @@ msgstr "error de generación"
msgid "Keypair data is invalid"
msgstr "Los datos del par de claves no son válidos"
-#: nova/tests/db/test_migrations.py:866
+#: nova/tests/compute/test_resources.py:78
+#, python-format
+msgid "Free %(free)d < requested %(requested)d "
+msgstr ""
+
+#: nova/tests/compute/test_resources.py:329
+msgid "Free CPUs 2.00 VCPUs < requested 5 VCPUs"
+msgstr ""
+
+#: nova/tests/db/test_migrations.py:931
#, python-format
msgid ""
"The following migrations are missing a downgrade:\n"
@@ -7853,27 +7409,27 @@ msgstr "Cuerpo: %s"
msgid "Unexpected status code"
msgstr "Código de estado inesperado"
-#: nova/tests/virt/hyperv/test_hypervapi.py:512
+#: nova/tests/virt/hyperv/test_hypervapi.py:515
msgid "fake vswitch not found"
msgstr "vswitch falso no encontrado"
-#: nova/tests/virt/hyperv/test_hypervapi.py:965
+#: nova/tests/virt/hyperv/test_hypervapi.py:968
msgid "Simulated failure"
msgstr "Falla simulada"
-#: nova/tests/virt/libvirt/fakelibvirt.py:1019
+#: nova/tests/virt/libvirt/fakelibvirt.py:1051
msgid "Expected a list for 'auth' parameter"
msgstr "Se esperaba una lista para el parámetro 'auth'"
-#: nova/tests/virt/libvirt/fakelibvirt.py:1023
+#: nova/tests/virt/libvirt/fakelibvirt.py:1055
msgid "Expected a function in 'auth[0]' parameter"
msgstr "Se esperaba una función en el parámetro 'auth[0]' "
-#: nova/tests/virt/libvirt/fakelibvirt.py:1027
+#: nova/tests/virt/libvirt/fakelibvirt.py:1059
msgid "Expected a function in 'auth[1]' parameter"
msgstr "Se esperaba una función en el parámetro 'auth[1]' "
-#: nova/tests/virt/libvirt/fakelibvirt.py:1038
+#: nova/tests/virt/libvirt/fakelibvirt.py:1070
msgid ""
"virEventRegisterDefaultImpl() must be called before "
"connection is used."
@@ -7881,8 +7437,32 @@ msgstr ""
"virEventRegisterDefaultImpl() debe ser invocado antes de que la conexión "
"sea utilizada."
-#: nova/tests/virt/vmwareapi/test_vm_util.py:196
-#: nova/virt/vmwareapi/vm_util.py:1087
+#: nova/tests/virt/vmwareapi/fake.py:241
+#, python-format
+msgid "Property %(attr)s not set for the managed object %(name)s"
+msgstr ""
+"La propiedad %(attr)s no se ha establecido para el objeto gestionado "
+"%(name)s"
+
+#: nova/tests/virt/vmwareapi/fake.py:985
+msgid "There is no VM registered"
+msgstr "No hay ninguna VM registrada"
+
+#: nova/tests/virt/vmwareapi/fake.py:987 nova/tests/virt/vmwareapi/fake.py:1338
+#, python-format
+msgid "Virtual Machine with ref %s is not there"
+msgstr "La máquina virtual con la referencia %s no está allí"
+
+#: nova/tests/virt/vmwareapi/fake.py:1127
+msgid "Session Invalid"
+msgstr "Sesión no válida"
+
+#: nova/tests/virt/vmwareapi/fake.py:1335
+msgid "No Virtual Machine has been registered yet"
+msgstr "No se ha registrado aún ninguna máquina virtual "
+
+#: nova/tests/virt/vmwareapi/test_ds_util.py:215
+#: nova/virt/vmwareapi/ds_util.py:261
#, python-format
msgid "Datastore regex %s did not match any datastores"
msgstr ""
@@ -7898,8 +7478,8 @@ msgstr ""
"Copia dispersa en progreso, %(complete_pct).2f%% completado. %(left)s "
"bytes restantes para copiar."
-#: nova/tests/virt/xenapi/image/test_bittorrent.py:126
-#: nova/virt/xenapi/image/bittorrent.py:81
+#: nova/tests/virt/xenapi/image/test_bittorrent.py:125
+#: nova/virt/xenapi/image/bittorrent.py:80
msgid ""
"Cannot create default bittorrent URL without torrent_base_url set or "
"torrent URL fetcher extension"
@@ -7907,86 +7487,120 @@ msgstr ""
"No se puede crear la URL predeterminada de bittorrent sin establecer "
"torrent_base_url la extensión de búsqueda de URL torrent"
-#: nova/tests/virt/xenapi/image/test_bittorrent.py:160
-#: nova/virt/xenapi/image/bittorrent.py:85
+#: nova/tests/virt/xenapi/image/test_bittorrent.py:159
+#: nova/virt/xenapi/image/bittorrent.py:84
msgid "Multiple torrent URL fetcher extensions found. Failing."
msgstr "Se han encontrado múltiples URL de buscadores torrent. Fallando."
-#: nova/virt/block_device.py:243
+#: nova/virt/block_device.py:255
#, python-format
msgid "Driver failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
"El controlador ha fallado al asignar el volumen %(volume_id)s en "
"%(mountpoint)s"
-#: nova/virt/block_device.py:362
+#: nova/virt/block_device.py:401
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr "Arrancando con el volumen %(volume_id)s en %(mountpoint)s"
-#: nova/virt/cpu.py:56 nova/virt/cpu.py:60
-#, python-format
-msgid "Invalid range expression %r"
-msgstr "Expresión de intérvalo inválida %"
-
-#: nova/virt/cpu.py:69
+#: nova/virt/diagnostics.py:143
#, python-format
-msgid "Invalid exclusion expression %r"
-msgstr "Expresión de exclusión inválida %r"
+msgid "Invalid type for %s"
+msgstr ""
-#: nova/virt/cpu.py:76
+#: nova/virt/diagnostics.py:147
#, python-format
-msgid "Invalid inclusion expression %r"
-msgstr "Expresión de inclusión inválida %"
+msgid "Invalid type for %s entry"
+msgstr ""
-#: nova/virt/cpu.py:81
-#, python-format
-msgid "No CPUs available after parsing %r"
-msgstr "CPU's no disponibles después de analizar %r"
+#: nova/virt/driver.py:708
+msgid "Hypervisor driver does not support post_live_migration_at_source method"
+msgstr ""
-#: nova/virt/driver.py:1207
+#: nova/virt/driver.py:1264
msgid "Event must be an instance of nova.virt.event.Event"
msgstr "El suceso debe ser una instancia de un nova.virt.event.Event"
-#: nova/virt/driver.py:1213
+#: nova/virt/driver.py:1270
#, python-format
msgid "Exception dispatching event %(event)s: %(ex)s"
msgstr "Excepción al asignar el suceso %(event)s: %(ex)s"
-#: nova/virt/driver.py:1295
+#: nova/virt/driver.py:1364
msgid "Compute driver option required, but not specified"
msgstr ""
"La opción de controlador de cálculo es necesaria, pero no se ha "
"especificado"
-#: nova/virt/driver.py:1298
+#: nova/virt/driver.py:1367
#, python-format
msgid "Loading compute driver '%s'"
msgstr "Cargando controlador de cálculo '%s' "
-#: nova/virt/driver.py:1305
+#: nova/virt/driver.py:1374
msgid "Unable to load the virtualization driver"
msgstr "Incapaz de cargar el controlador de virtualización"
-#: nova/virt/fake.py:216
+#: nova/virt/event.py:33
+msgid "Started"
+msgstr "Arrancado"
+
+#: nova/virt/event.py:34
+msgid "Stopped"
+msgstr ""
+
+#: nova/virt/event.py:35
+msgid "Paused"
+msgstr "Pausada"
+
+#: nova/virt/event.py:36
+msgid "Resumed"
+msgstr "Reanudada"
+
+#: nova/virt/event.py:108
+msgid "Unknown"
+msgstr "Desconocido"
+
+#: nova/virt/fake.py:217
#, python-format
msgid "Key '%(key)s' not in instances '%(inst)s'"
msgstr "La clave '%(key)s' no está en las instancias '%(inst)s'"
-#: nova/virt/firewall.py:178
+#: nova/virt/firewall.py:174
msgid "Attempted to unfilter instance which is not filtered"
msgstr "Se ha intentado eliminar filtro de instancia que no está filtrada"
-#: nova/virt/images.py:86
+#: nova/virt/hardware.py:46
+#, python-format
+msgid "No CPUs available after parsing %r"
+msgstr "CPU's no disponibles después de analizar %r"
+
+#: nova/virt/hardware.py:78 nova/virt/hardware.py:82
+#, python-format
+msgid "Invalid range expression %r"
+msgstr "Expresión de intérvalo inválida %"
+
+#: nova/virt/hardware.py:91
+#, python-format
+msgid "Invalid exclusion expression %r"
+msgstr "Expresión de exclusión inválida %r"
+
+#: nova/virt/hardware.py:98
+#, python-format
+msgid "Invalid inclusion expression %r"
+msgstr "Expresión de inclusión inválida %"
+
+#: nova/virt/images.py:81
msgid "'qemu-img info' parsing failed."
msgstr "Se ha encontrado un error en el análisis de 'qemu-img info'."
-#: nova/virt/images.py:92
+#: nova/virt/images.py:87
#, python-format
msgid "fmt=%(fmt)s backed by: %(backing_file)s"
msgstr "fmt=%(fmt)s respaldado por: %(backing_file)s"
-#: nova/virt/images.py:105
+#: nova/virt/images.py:100
#, python-format
msgid ""
"%(base)s virtual size %(disk_size)s larger than flavor root disk size "
@@ -7995,12 +7609,12 @@ msgstr ""
"El tamaño virtual %(disk_size)s de %(base)s es más grande que el tamaño "
"del disco raíz del sabor %(size)s"
-#: nova/virt/images.py:122
+#: nova/virt/images.py:117
#, python-format
msgid "Converted to raw, but format is now %s"
msgstr "Convertido a sin formato, pero el formato es ahora %s"
-#: nova/virt/storage_users.py:63 nova/virt/storage_users.py:101
+#: nova/virt/storage_users.py:64 nova/virt/storage_users.py:102
#, python-format
msgid "Cannot decode JSON from %(id_path)s"
msgstr "No se puede decodificar el JSOON de %(id_path)s"
@@ -8035,45 +7649,45 @@ msgstr "cpu_arch no se ha encontrado en flavor_extra_specs"
msgid "Baremetal node id not supplied to driver for %r"
msgstr "ID de nodo de máquina vacía no proporcionado a controlador para %r"
-#: nova/virt/baremetal/driver.py:289
+#: nova/virt/baremetal/driver.py:292
#, python-format
msgid "Error deploying instance %(instance)s on baremetal node %(node)s."
msgstr ""
"Error al desplegar la instancia %(instance)s en nodo de máquina vacía "
"%(node)s."
-#: nova/virt/baremetal/driver.py:364
+#: nova/virt/baremetal/driver.py:367
#, python-format
msgid "Baremetal power manager failed to restart node for instance %r"
msgstr ""
"El gestor de alimentación de máquina vacía no ha podido reiniciar el nodo"
" para la instancia %r"
-#: nova/virt/baremetal/driver.py:375
+#: nova/virt/baremetal/driver.py:379
#, python-format
msgid "Destroy called on non-existing instance %s"
msgstr "Se ha llamado una destrucción en una instancia no existente %s"
-#: nova/virt/baremetal/driver.py:393
+#: nova/virt/baremetal/driver.py:397
#, python-format
msgid "Error from baremetal driver during destroy: %s"
msgstr "Error del controlador de máquina vacía durante la destrucción: %s"
-#: nova/virt/baremetal/driver.py:398
+#: nova/virt/baremetal/driver.py:402
#, python-format
msgid "Error while recording destroy failure in baremetal database: %s"
msgstr ""
"Error al registrar la anomalía de destrcción en la base de datos de "
"máquina vacía: %s"
-#: nova/virt/baremetal/driver.py:413
+#: nova/virt/baremetal/driver.py:417
#, python-format
msgid "Baremetal power manager failed to stop node for instance %r"
msgstr ""
"El gestor de alimentación de máquina vacía no ha podido detener el nodo "
"para la instancia %r"
-#: nova/virt/baremetal/driver.py:426
+#: nova/virt/baremetal/driver.py:430
#, python-format
msgid "Baremetal power manager failed to start node for instance %r"
msgstr ""
@@ -8164,7 +7778,7 @@ msgstr ""
"No se puede activar el cargador de arranque PXE. Los parámetros de "
"arranque siguientes no se han pasado al controlador de máquina vacía: %s"
-#: nova/virt/baremetal/pxe.py:465 nova/virt/baremetal/tilera.py:317
+#: nova/virt/baremetal/pxe.py:465 nova/virt/baremetal/tilera.py:318
#, python-format
msgid "Node associated with another instance while waiting for deploy of %s"
msgstr "Nodo asociado con otra instancia mientras se esperaba el despliegue de %s"
@@ -8184,7 +7798,7 @@ msgstr "El despliegue de PXE se ha completado para la instancia %s"
msgid "PXE deploy failed for instance %s"
msgstr "Se ha encontrado un error en el despliegue de PXE para la instancia %s"
-#: nova/virt/baremetal/pxe.py:483 nova/virt/baremetal/tilera.py:342
+#: nova/virt/baremetal/pxe.py:483 nova/virt/baremetal/tilera.py:343
#, python-format
msgid "Baremetal node deleted while waiting for deployment of instance %s"
msgstr ""
@@ -8208,21 +7822,21 @@ msgstr ""
"parámetros de arranque no han sido proporcionados al controlador de "
"baremetal: %s"
-#: nova/virt/baremetal/tilera.py:323
+#: nova/virt/baremetal/tilera.py:324
#, python-format
msgid "Tilera deploy started for instance %s"
msgstr "Despliegue Tilera iniciado para la instancia %s"
-#: nova/virt/baremetal/tilera.py:329
+#: nova/virt/baremetal/tilera.py:330
#, python-format
msgid "Tilera deploy completed for instance %s"
msgstr "Despliege Tilera completado para instancia %s"
-#: nova/virt/baremetal/tilera.py:337
+#: nova/virt/baremetal/tilera.py:338
msgid "Node is unknown error state."
msgstr "El nodo está en un estado de error desconocido."
-#: nova/virt/baremetal/tilera.py:340
+#: nova/virt/baremetal/tilera.py:341
#, python-format
msgid "Tilera deploy failed for instance %s"
msgstr "Despliegue tilera fallido para la instancia %s"
@@ -8330,90 +7944,71 @@ msgstr "Error al ejecutar comando: %s"
msgid "baremetal driver was unable to delete tid %s"
msgstr "el controlador de máquina vacía no ha podido suprimir el tid %s"
-#: nova/virt/baremetal/volume_driver.py:195 nova/virt/hyperv/volumeops.py:189
+#: nova/virt/baremetal/volume_driver.py:195 nova/virt/hyperv/volumeops.py:196
msgid "Could not determine iscsi initiator name"
msgstr "No se ha podido determinar el nombre de iniciador iscsi "
-#: nova/virt/baremetal/volume_driver.py:234
+#: nova/virt/baremetal/volume_driver.py:225
#, python-format
msgid "No fixed PXE IP is associated to %s"
msgstr "No hay ninguna IP PXE fija asociada a %s"
-#: nova/virt/baremetal/volume_driver.py:288
+#: nova/virt/baremetal/volume_driver.py:283
#, python-format
msgid "detach volume could not find tid for %s"
msgstr "el volumen de desconexión no ha podido encontrar tid para %s"
-#: nova/virt/baremetal/db/sqlalchemy/api.py:198
+#: nova/virt/baremetal/db/sqlalchemy/api.py:199
msgid "instance_uuid must be supplied to bm_node_associate_and_update"
msgstr "instance_uuid se debe proporcionar para bm_node_associate_and_update"
-#: nova/virt/baremetal/db/sqlalchemy/api.py:210
+#: nova/virt/baremetal/db/sqlalchemy/api.py:211
#, python-format
msgid "Failed to associate instance %(i_uuid)s to baremetal node %(n_uuid)s."
msgstr ""
"No se ha podido asociar la instancia %(i_uuid)s a nodo de máquina vacía "
"%(n_uuid)s."
-#: nova/virt/baremetal/db/sqlalchemy/api.py:245
-#: nova/virt/baremetal/db/sqlalchemy/api.py:287
+#: nova/virt/baremetal/db/sqlalchemy/api.py:246
+#: nova/virt/baremetal/db/sqlalchemy/api.py:288
#, python-format
msgid "Baremetal interface %s not found"
msgstr "Interfaz de máquina vacía %s no encontrada"
-#: nova/virt/baremetal/db/sqlalchemy/api.py:297
+#: nova/virt/baremetal/db/sqlalchemy/api.py:298
#, python-format
msgid "Baremetal interface %s already in use"
msgstr "La interfaz de máquina vacía %s ya se está utilizando"
-#: nova/virt/baremetal/db/sqlalchemy/api.py:310
+#: nova/virt/baremetal/db/sqlalchemy/api.py:311
#, python-format
msgid "Baremetal virtual interface %s not found"
msgstr "No se ha encontrado la interfaz virtual de máquina vacía %s"
-#: nova/virt/disk/api.py:285
+#: nova/virt/disk/api.py:292
msgid "image already mounted"
msgstr "imagen ya montada"
-#: nova/virt/disk/api.py:359
-#, python-format
-msgid "Ignoring error injecting data into image (%(e)s)"
-msgstr "Ignorando el error al inyectar datos en la imagen (%(e)s)"
-
-#: nova/virt/disk/api.py:381
-#, python-format
-msgid ""
-"Failed to mount container filesystem '%(image)s' on '%(target)s': "
-"%(errors)s"
-msgstr ""
-"Se ha encontrado un error en el montaje del sistema de archivos de "
-"contenedor '%(image)s' en '%(target)s': : %(errors)s"
-
-#: nova/virt/disk/api.py:411
+#: nova/virt/disk/api.py:418
#, python-format
msgid "Failed to teardown container filesystem: %s"
msgstr "Fallo al desarmar el contenedor de sistema de archivo: %s"
-#: nova/virt/disk/api.py:424
+#: nova/virt/disk/api.py:431
#, python-format
msgid "Failed to umount container filesystem: %s"
msgstr "No se ha podido desmontar el sistema de archivos de contenedor: %s"
-#: nova/virt/disk/api.py:449
-#, python-format
-msgid "Ignoring error injecting %(inject)s into image (%(e)s)"
-msgstr "Ignorando el error al inyectar %(inject)s en la imagen (%(e)s)"
-
-#: nova/virt/disk/api.py:609
+#: nova/virt/disk/api.py:616
msgid "Not implemented on Windows"
msgstr "No implementado en Windows"
-#: nova/virt/disk/api.py:636
+#: nova/virt/disk/api.py:643
#, python-format
msgid "User %(username)s not found in password file."
msgstr "El usuario %(username)s no se ha encontrado en el archivo de contraseña."
-#: nova/virt/disk/api.py:652
+#: nova/virt/disk/api.py:659
#, python-format
msgid "User %(username)s not found in shadow file."
msgstr "El usuario %(username)s no se ha encontrado en el archivo de duplicación. "
@@ -8495,22 +8090,22 @@ msgstr "el dispositivo nbd %s no se ha mostrado"
msgid "Detaching from erroneous nbd device returned error: %s"
msgstr "La desconexión del dispositivo nbd erróneo ha devuelto un error: %s"
-#: nova/virt/disk/vfs/guestfs.py:64
+#: nova/virt/disk/vfs/guestfs.py:77
#, python-format
msgid "No operating system found in %s"
msgstr "No se ha encontrado ningún sistema operativo en %s"
-#: nova/virt/disk/vfs/guestfs.py:70
+#: nova/virt/disk/vfs/guestfs.py:83
#, python-format
msgid "Multi-boot operating system found in %s"
msgstr "Se ha encontrado sistema operativo multiarranque en %s"
-#: nova/virt/disk/vfs/guestfs.py:81
+#: nova/virt/disk/vfs/guestfs.py:94
#, python-format
msgid "No mount points found in %(root)s of %(imgfile)s"
msgstr "No se han encontrado puntos de montaje en %(root)s de %(imgfile)s"
-#: nova/virt/disk/vfs/guestfs.py:95
+#: nova/virt/disk/vfs/guestfs.py:108
#, python-format
msgid ""
"Error mounting %(device)s to %(dir)s in image %(imgfile)s with libguestfs"
@@ -8519,22 +8114,22 @@ msgstr ""
"Error montaod %(device)s en %(dir)s en imagen %(imgfile)s con libguestfs "
"(%(e)s)"
-#: nova/virt/disk/vfs/guestfs.py:131
+#: nova/virt/disk/vfs/guestfs.py:156
#, python-format
msgid "Error mounting %(imgfile)s with libguestfs (%(e)s)"
msgstr "Error al montar %(imgfile)s con libguestfs (%(e)s)"
-#: nova/virt/disk/vfs/guestfs.py:147
+#: nova/virt/disk/vfs/guestfs.py:172
#, python-format
msgid "Failed to close augeas %s"
msgstr "No se ha podido cerrar augeas %s"
-#: nova/virt/disk/vfs/guestfs.py:155
+#: nova/virt/disk/vfs/guestfs.py:180
#, python-format
msgid "Failed to shutdown appliance %s"
msgstr "No se ha podido concluir el dispositivo %s"
-#: nova/virt/disk/vfs/guestfs.py:163
+#: nova/virt/disk/vfs/guestfs.py:188
#, python-format
msgid "Failed to close guest handle %s"
msgstr "No se ha podido cerrar manejador de invitado %s"
@@ -8550,11 +8145,11 @@ msgstr ""
"No se puede encontrar el nombre de iniciador ISCSI. Eligiendo el "
"predeterminado"
-#: nova/virt/hyperv/driver.py:165
+#: nova/virt/hyperv/driver.py:169
msgid "VIF plugging is not supported by the Hyper-V driver."
msgstr "Conexión de VIF no está soportado por el driver de Hyper-V."
-#: nova/virt/hyperv/driver.py:170
+#: nova/virt/hyperv/driver.py:174
msgid "VIF unplugging is not supported by the Hyper-V driver."
msgstr "Desconexión de VIF no está soportado por el driver de Hyper-V."
@@ -8605,11 +8200,11 @@ msgstr "VM no encontrada: %s"
msgid "Duplicate VM name found: %s"
msgstr "Se ha encontrado nombre de VM duplicado: %s"
-#: nova/virt/hyperv/migrationops.py:97
+#: nova/virt/hyperv/migrationops.py:98
msgid "Cannot cleanup migration files"
msgstr "No se pueden limpiar los archivos de migración"
-#: nova/virt/hyperv/migrationops.py:105
+#: nova/virt/hyperv/migrationops.py:106
#, python-format
msgid ""
"Cannot resize the root disk to a smaller size. Current size: "
@@ -8618,11 +8213,16 @@ msgstr ""
"No se puede cambiar el tamaño del disco raíz a un menor tamaño. Tamaño "
"actual: %(curr_root_gb)s GB. Tamaño solicitado: %(new_root_gb)s GB"
-#: nova/virt/hyperv/migrationops.py:200
+#: nova/virt/hyperv/migrationops.py:155
+#, python-format
+msgid "Config drive is required by instance: %s, but it does not exist."
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:214
msgid "Cannot resize a VHD to a smaller size"
msgstr "No se puede redimensionar un VHD a un tamaño menor"
-#: nova/virt/hyperv/migrationops.py:245
+#: nova/virt/hyperv/migrationops.py:259
#, python-format
msgid "Cannot find boot VHD file for instance: %s"
msgstr "No se puede encontrar el archivo VHD para la instancia: %s"
@@ -8643,7 +8243,7 @@ msgstr ""
msgid "No external vswitch found"
msgstr "No se ha encontrado vswitch externo"
-#: nova/virt/hyperv/pathutils.py:71
+#: nova/virt/hyperv/pathutils.py:73
#, python-format
msgid "The file copy from %(src)s to %(dest)s failed"
msgstr "Se ha encontrado un error en la copia del archivo de %(src)s a %(dest)s"
@@ -8653,30 +8253,32 @@ msgstr "Se ha encontrado un error en la copia del archivo de %(src)s a %(dest)s"
msgid "Failed to remove snapshot for VM %s"
msgstr "No se ha podido eliminar la instantánea para VM %s"
-#: nova/virt/hyperv/vhdutils.py:65 nova/virt/hyperv/vhdutilsv2.py:63
+#: nova/virt/hyperv/utilsfactory.py:68
+msgid ""
+"The \"force_hyperv_utils_v1\" option cannot be set to \"True\" on Windows"
+" Server / Hyper-V Server 2012 R2 or above as the WMI "
+"\"root/virtualization\" namespace is no longer supported."
+msgstr ""
+
+#: nova/virt/hyperv/vhdutils.py:66 nova/virt/hyperv/vhdutilsv2.py:64
#, python-format
msgid "Unsupported disk format: %s"
msgstr "Formato de disco no soportado: %s"
-#: nova/virt/hyperv/vhdutils.py:150
-#, python-format
-msgid "The %(vhd_type)s type VHD is not supported"
-msgstr "El VHD de tipo %(vhd_type)s no está soportado"
+#: nova/virt/hyperv/vhdutils.py:77
+msgid "VHD differencing disks cannot be resized"
+msgstr ""
-#: nova/virt/hyperv/vhdutils.py:161
+#: nova/virt/hyperv/vhdutils.py:165
#, python-format
msgid "Unable to obtain block size from VHD %(vhd_path)s"
msgstr "Incapaz de obtener el tamaño de bloque del VHD %(vhd_path)s"
-#: nova/virt/hyperv/vhdutils.py:208
+#: nova/virt/hyperv/vhdutils.py:212
msgid "Unsupported virtual disk format"
msgstr "Formato de disco virtual no soportado."
-#: nova/virt/hyperv/vhdutilsv2.py:134
-msgid "Differencing VHDX images are not supported"
-msgstr "La diferenciación de imágenes VHDX no está soportada"
-
-#: nova/virt/hyperv/vhdutilsv2.py:157
+#: nova/virt/hyperv/vhdutilsv2.py:160
#, python-format
msgid "Unable to obtain internal size from VHDX: %(vhd_path)s. Exception: %(ex)s"
msgstr ""
@@ -8688,48 +8290,46 @@ msgstr ""
msgid "VIF driver not found for network_api_class: %s"
msgstr "No se ha encontrado el controlador VIF para network_api_class: %s"
-#: nova/virt/hyperv/vmops.py:169
+#: nova/virt/hyperv/vmops.py:198
#, python-format
msgid ""
-"Cannot resize a VHD to a smaller size, the original size is "
-"%(base_vhd_size)s, the newer size is %(root_vhd_size)s"
+"Cannot resize a VHD to a smaller size, the original size is %(old_size)s,"
+" the newer size is %(new_size)s"
msgstr ""
-"No se puede cambiar de tamaño un VHD a un tamaño menor, el tamaño "
-"original es %(base_vhd_size)s, el tamaño nuevo es %(root_vhd_size)s"
-#: nova/virt/hyperv/vmops.py:206
+#: nova/virt/hyperv/vmops.py:228
msgid "Spawning new instance"
msgstr "Generando nueva instancia"
-#: nova/virt/hyperv/vmops.py:280 nova/virt/vmwareapi/vmops.py:520
+#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:574
#, python-format
msgid "Invalid config_drive_format \"%s\""
msgstr "config_drive_format \"%s\" no válido"
-#: nova/virt/hyperv/vmops.py:283 nova/virt/vmwareapi/vmops.py:524
+#: nova/virt/hyperv/vmops.py:307 nova/virt/vmwareapi/vmops.py:578
msgid "Using config drive for instance"
msgstr "Utilizando dispositivo de configuración para instancia"
-#: nova/virt/hyperv/vmops.py:296
+#: nova/virt/hyperv/vmops.py:320
#, python-format
msgid "Creating config drive at %(path)s"
msgstr "Creando unidad de configuración en %(path)s"
-#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:549
+#: nova/virt/hyperv/vmops.py:328 nova/virt/vmwareapi/vmops.py:603
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr "La creación de unidad de configuración ha fallado con el error: %s"
-#: nova/virt/hyperv/vmops.py:340
+#: nova/virt/hyperv/vmops.py:371
msgid "Got request to destroy instance"
msgstr "Se ha obtenido una solicitud para destruir instancia"
-#: nova/virt/hyperv/vmops.py:359
+#: nova/virt/hyperv/vmops.py:390
#, python-format
msgid "Failed to destroy instance: %s"
msgstr "No se ha podido destruir instancia: %s"
-#: nova/virt/hyperv/vmops.py:412
+#: nova/virt/hyperv/vmops.py:443
#, python-format
msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
msgstr "No se ha podido cambiar el estado de vm de %(vm_name)s a %(req_state)s "
@@ -8778,12 +8378,12 @@ msgstr "Tarea WMI fallida con estado %d. No hay descripción de error disponible
msgid "Metrics collection is not supported on this version of Hyper-V"
msgstr "La recolección de métricas no está soportada en esta versión de Hyper-V"
-#: nova/virt/hyperv/volumeops.py:146
+#: nova/virt/hyperv/volumeops.py:148
#, python-format
msgid "Unable to attach volume to instance %s"
msgstr "Imposible adjuntar volumen a la instancia %s"
-#: nova/virt/hyperv/volumeops.py:215 nova/virt/hyperv/volumeops.py:229
+#: nova/virt/hyperv/volumeops.py:222 nova/virt/hyperv/volumeops.py:236
#, python-format
msgid "Unable to find a mounted disk for target_iqn: %s"
msgstr "No se ha podido encontrar un disco montado para target_iqn: %s "
@@ -8813,21 +8413,21 @@ msgstr "No hay nombres de dispositivo de disco libres para el prefijo '%s'"
msgid "Unable to determine disk bus for '%s'"
msgstr "No se puede determinar el bus de disco para '%s'"
-#: nova/virt/libvirt/driver.py:542
+#: nova/virt/libvirt/driver.py:550
#, python-format
msgid "Connection to libvirt lost: %s"
msgstr "Conexión hacia libvirt perdida: %s"
-#: nova/virt/libvirt/driver.py:724
+#: nova/virt/libvirt/driver.py:739
#, python-format
msgid "Can not handle authentication request for %d credentials"
msgstr "No se puede manejar la solicitud de autenticación para las credenciales %d"
-#: nova/virt/libvirt/driver.py:868
+#: nova/virt/libvirt/driver.py:922
msgid "operation time out"
msgstr "Tiempo de espera agotado para la operación"
-#: nova/virt/libvirt/driver.py:1187
+#: nova/virt/libvirt/driver.py:1246
#, python-format
msgid ""
"Volume sets block size, but the current libvirt hypervisor '%s' does not "
@@ -8836,61 +8436,82 @@ msgstr ""
"El volúmen establece el tamaño de bloque, pero el hipervisor libvirt "
"actual '%s' no soporta tamaño de bloque personalizado."
-#: nova/virt/libvirt/driver.py:1194
+#: nova/virt/libvirt/driver.py:1253
#, python-format
msgid "Volume sets block size, but libvirt '%s' or later is required."
msgstr ""
"El volúmen establece el tamaño de bloque, pero se requiere libvirt '%s' o"
" mayor."
-#: nova/virt/libvirt/driver.py:1292
+#: nova/virt/libvirt/driver.py:1351
msgid "Swap only supports host devices"
msgstr "El espacio de intercambio solamente soporta dispositivos de anfitrión "
-#: nova/virt/libvirt/driver.py:1579
+#: nova/virt/libvirt/driver.py:1638
msgid "libvirt error while requesting blockjob info."
msgstr "error de libvirt al solicitar información de blockjob."
-#: nova/virt/libvirt/driver.py:1712
+#: nova/virt/libvirt/driver.py:1783
msgid "Found no disk to snapshot."
msgstr "No se ha encontrado disco relacionado a instantánea."
-#: nova/virt/libvirt/driver.py:1790
+#: nova/virt/libvirt/driver.py:1875
#, python-format
msgid "Unknown type: %s"
msgstr "Tipo desconocido: %s"
-#: nova/virt/libvirt/driver.py:1795
+#: nova/virt/libvirt/driver.py:1880
msgid "snapshot_id required in create_info"
msgstr "snapshot_id es requerido en create_info"
-#: nova/virt/libvirt/driver.py:1853
+#: nova/virt/libvirt/driver.py:1938
#, python-format
msgid "Libvirt '%s' or later is required for online deletion of volume snapshots."
msgstr ""
"Libvirt '%s' o mayor se requiere para remoción en línea de instantáneas "
"de volumen."
-#: nova/virt/libvirt/driver.py:1860
+#: nova/virt/libvirt/driver.py:1945
#, python-format
msgid "Unknown delete_info type %s"
msgstr "Tipo delete_info %s desconocido"
-#: nova/virt/libvirt/driver.py:1890
+#: nova/virt/libvirt/driver.py:1981
+#, python-format
+msgid "Disk with id: %s not found attached to instance."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1990
+msgid "filename cannot be None"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2019
+#, python-format
+msgid "no match found for %s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2076
#, python-format
-msgid "Unable to locate disk matching id: %s"
-msgstr "Incapaz de localizar identificador de disco coincidente: %s"
+msgid ""
+"Relative blockcommit support was not detected. Libvirt '%s' or later is "
+"required for online deletion of network storage-backed volume snapshots."
+msgstr ""
-#: nova/virt/libvirt/driver.py:2330 nova/virt/xenapi/vmops.py:1552
+#: nova/virt/libvirt/driver.py:2491 nova/virt/xenapi/vmops.py:1561
msgid "Guest does not have a console available"
msgstr "El invitado no tiene una consola disponible"
-#: nova/virt/libvirt/driver.py:2746
+#: nova/virt/libvirt/driver.py:2820
+#, python-format
+msgid "%s format is not supported"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2926
#, python-format
msgid "Detaching PCI devices with libvirt < %(ver)s is not permitted"
msgstr "La remoción de dispositivos PCI con libvirt < %(ver)s no está permitida"
-#: nova/virt/libvirt/driver.py:2912
+#: nova/virt/libvirt/driver.py:3069
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
@@ -8899,27 +8520,19 @@ msgstr ""
"La configuración ha solicitado un modelo CPU explícito, pero el "
"hipervisor libvirt actual '%s' no soporta la selección de modelos de CPU"
-#: nova/virt/libvirt/driver.py:2918
+#: nova/virt/libvirt/driver.py:3075
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
"La configuración ha solicitado un modelo de CPU personalizado, pero no se"
" ha proporcionado ningún nombre de modelo"
-#: nova/virt/libvirt/driver.py:2922
+#: nova/virt/libvirt/driver.py:3079
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
"No se debe establecer un nombre de modelo de CPU cuando se solicita un "
"modelo de CPU de host"
-#: nova/virt/libvirt/driver.py:2942
-msgid ""
-"Passthrough of the host CPU was requested but this libvirt version does "
-"not support this feature"
-msgstr ""
-"Se ha solicitado el paso a través de la CPU de host pero esta versión de "
-"libvirt no soporta esta función"
-
-#: nova/virt/libvirt/driver.py:3475
+#: nova/virt/libvirt/driver.py:3689
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_id)s: [Error Code "
@@ -8928,7 +8541,7 @@ msgstr ""
"Error de libvirt durante la búsqueda de %(instance_id)s: [Código de Error"
" %(error_code)s] %(ex)s"
-#: nova/virt/libvirt/driver.py:3496
+#: nova/virt/libvirt/driver.py:3710
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
@@ -8937,27 +8550,27 @@ msgstr ""
"Error de libvirt al buscar %(instance_name)s: [Código de error "
"%(error_code)s] %(ex)s"
-#: nova/virt/libvirt/driver.py:3760
+#: nova/virt/libvirt/driver.py:3976
msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range."
msgstr "Configuración vcpu_pin_set inválida, fuera de rango de cpu de hipervisor."
-#: nova/virt/libvirt/driver.py:3890
+#: nova/virt/libvirt/driver.py:4101
msgid "libvirt version is too old (does not support getVersion)"
msgstr "La versión libvirt es demasiado antigua (no soporta getVersion)"
-#: nova/virt/libvirt/driver.py:4251
+#: nova/virt/libvirt/driver.py:4462
msgid "Block migration can not be used with shared storage."
msgstr ""
"No se puede utilizar la migración de bloque con almacenamiento "
"compartido. "
-#: nova/virt/libvirt/driver.py:4259
+#: nova/virt/libvirt/driver.py:4471
msgid "Live migration can not be used without shared storage."
msgstr ""
"No se puede utilizar la migración en directo con almacenamiento "
"compartido."
-#: nova/virt/libvirt/driver.py:4303
+#: nova/virt/libvirt/driver.py:4541
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
@@ -8967,7 +8580,7 @@ msgstr ""
"demasiado grande (disponible en host de destino: %(available)s < "
"necesario: %(necessary)s)"
-#: nova/virt/libvirt/driver.py:4342
+#: nova/virt/libvirt/driver.py:4580
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -8982,12 +8595,40 @@ msgstr ""
"\n"
"Consulte %(u)s"
-#: nova/virt/libvirt/driver.py:4409
+#: nova/virt/libvirt/driver.py:4643
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr "El filtro de cortafuegos para %s no existe "
-#: nova/virt/libvirt/driver.py:4900
+#: nova/virt/libvirt/driver.py:4706
+msgid ""
+"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag "
+"or your destination node does not support retrieving listen addresses. "
+"In order for live migration to work properly, you must configure the "
+"graphics (VNC and/or SPICE) listen addresses to be either the catch-all "
+"address (0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:4723
+msgid ""
+"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag,"
+" and the graphics (VNC and/or SPICE) listen addresses on the destination"
+" node do not match the addresses on the source node. Since the source "
+"node has listen addresses set to either the catch-all address (0.0.0.0 or"
+" ::) or the local address (127.0.0.1 or ::1), the live migration will "
+"succeed, but the VM will continue to listen on the current addresses."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:5100
+#, python-format
+msgid ""
+"Error from libvirt while getting description of %(instance_name)s: [Error"
+" Code %(error_code)s] %(ex)s"
+msgstr ""
+"Error de libvirt al obtener la descripción de %(instance_name)s: [Código "
+"de error %(error_code)s] %(ex)s"
+
+#: nova/virt/libvirt/driver.py:5226
msgid "Unable to resize disk down."
msgstr "Incapaz de reducir el tamaño del disco."
@@ -9000,26 +8641,38 @@ msgstr "No se puede cargar la linea %(line)s, se ha obtenido el error %(error)s"
msgid "Attempted overwrite of an existing value."
msgstr "Se ha intentado sobreescribir un valor ya existente."
-#: nova/virt/libvirt/imagebackend.py:429
+#: nova/virt/libvirt/imagebackend.py:316
+msgid "clone() is not implemented"
+msgstr ""
+
+#: nova/virt/libvirt/imagebackend.py:449
msgid "You should specify images_volume_group flag to use LVM images."
msgstr ""
"Debes especificar la bandera images_volue_group para utilizar imagenes "
"LVM."
-#: nova/virt/libvirt/imagebackend.py:544
+#: nova/virt/libvirt/imagebackend.py:522
msgid "You should specify images_rbd_pool flag to use rbd images."
msgstr "Debes especificar la bandera images_rbd_pool para utilizar imagenes rbd."
-#: nova/virt/libvirt/imagebackend.py:658
-msgid "rbd python libraries not found"
-msgstr "Las librerías rbd python no han sido encontradas"
+#: nova/virt/libvirt/imagebackend.py:612
+msgid "installed version of librbd does not support cloning"
+msgstr ""
+
+#: nova/virt/libvirt/imagebackend.py:623
+msgid "Image is not raw format"
+msgstr ""
+
+#: nova/virt/libvirt/imagebackend.py:631
+msgid "No image locations are accessible"
+msgstr ""
-#: nova/virt/libvirt/imagebackend.py:697
+#: nova/virt/libvirt/imagebackend.py:651
#, python-format
msgid "Unknown image_type=%s"
msgstr "image_type=%s desconocido "
-#: nova/virt/libvirt/lvm.py:55
+#: nova/virt/libvirt/lvm.py:54
#, python-format
msgid ""
"Insufficient Space on Volume Group %(vg)s. Only %(free_space)db "
@@ -9028,111 +8681,105 @@ msgstr ""
"Espacio insuficiente en grupo de volumen %(vg)s. Sólo %(free_space)db "
"disponibles, pero se necesitan %(size)db para el volumen %(lv)s."
-#: nova/virt/libvirt/lvm.py:103
+#: nova/virt/libvirt/lvm.py:102
#, python-format
msgid "vg %s must be LVM volume group"
msgstr "El grupo de volúmenes %s debe ser el grupo de volúmenes LVM"
-#: nova/virt/libvirt/lvm.py:146
+#: nova/virt/libvirt/lvm.py:145
#, python-format
msgid "Path %s must be LVM logical volume"
msgstr "La vía de acceso %s debe ser el volumen lógico LVM"
-#: nova/virt/libvirt/lvm.py:222
+#: nova/virt/libvirt/lvm.py:221
#, python-format
msgid "volume_clear='%s' is not handled"
msgstr "volume_clear='%s' no está manejado"
+#: nova/virt/libvirt/rbd_utils.py:104
+msgid "rbd python libraries not found"
+msgstr "Las librerías rbd python no han sido encontradas"
+
+#: nova/virt/libvirt/rbd_utils.py:159
+msgid "Not stored in rbd"
+msgstr "No está almacenado en rbd"
+
+#: nova/virt/libvirt/rbd_utils.py:163
+msgid "Blank components"
+msgstr "Componentes en blanco"
+
+#: nova/virt/libvirt/rbd_utils.py:166
+msgid "Not an rbd snapshot"
+msgstr "No es una instantánea rbd"
+
#: nova/virt/libvirt/utils.py:79
msgid "Cannot find any Fibre Channel HBAs"
msgstr "No se puede encontrar ningún HBA de canal de fibra"
-#: nova/virt/libvirt/utils.py:431
+#: nova/virt/libvirt/utils.py:391
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
"No se puede recuperar la vía de acceso ed dispositivo raíz de la "
"configuración de libvirt de instancia"
-#: nova/virt/libvirt/vif.py:353 nova/virt/libvirt/vif.py:608
-#: nova/virt/libvirt/vif.py:797
+#: nova/virt/libvirt/vif.py:322 nova/virt/libvirt/vif.py:508
+#: nova/virt/libvirt/vif.py:652
msgid "vif_type parameter must be present for this vif_driver implementation"
msgstr ""
"El parámetro vif_type debe estar presente para esta implementación de "
"vif_driver"
-#: nova/virt/libvirt/vif.py:397 nova/virt/libvirt/vif.py:628
-#: nova/virt/libvirt/vif.py:817
+#: nova/virt/libvirt/vif.py:328 nova/virt/libvirt/vif.py:514
+#: nova/virt/libvirt/vif.py:658
#, python-format
msgid "Unexpected vif_type=%s"
msgstr "vif_type=%s inesperado"
-#: nova/virt/libvirt/volume.py:291
+#: nova/virt/libvirt/volume.py:294
#, python-format
msgid "iSCSI device not found at %s"
msgstr "No se ha encontrado el dispositivo iSCSI en %s"
-#: nova/virt/libvirt/volume.py:737
+#: nova/virt/libvirt/volume.py:740
#, python-format
msgid "AoE device not found at %s"
msgstr "No se ha encontrado el dispositivo AoE en %s"
-#: nova/virt/libvirt/volume.py:909
+#: nova/virt/libvirt/volume.py:912
msgid "We are unable to locate any Fibre Channel devices"
msgstr "No se puede localizar ningún dispositivo de canal de fibra"
-#: nova/virt/libvirt/volume.py:928
+#: nova/virt/libvirt/volume.py:931
msgid "Fibre Channel device not found."
msgstr "No se ha encontrado el dispositivo de canal de fibra."
-#: nova/virt/vmwareapi/driver.py:103
-msgid ""
-"The VMware ESX driver is now deprecated and will be removed in the Juno "
-"release. The VC driver will remain and continue to be supported."
-msgstr ""
-"El controlador de VMware ESX esta ahora obsoleto y será removido en la "
-"liberación Juno. El controlador CV se mantendrá y seguirá siendo "
-"soportado."
-
-#: nova/virt/vmwareapi/driver.py:115
+#: nova/virt/vmwareapi/driver.py:125
msgid ""
"Must specify host_ip, host_username and host_password to use "
-"compute_driver=vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver"
+"vmwareapi.VMwareVCDriver"
msgstr ""
-"Se debe especificar host_ip, host_username y host_password para usar "
-"compute_driver=vmwareapi.VMwareESXDriver o vmwareapi.VMwareVCDriver"
-#: nova/virt/vmwareapi/driver.py:127
+#: nova/virt/vmwareapi/driver.py:134
#, python-format
msgid "Invalid Regular Expression %s"
msgstr "La expresión regular %s es inválida"
-#: nova/virt/vmwareapi/driver.py:242
-msgid "Instance cannot be found in host, or in an unknownstate."
-msgstr ""
-"La instancia no se puede encontrar en el anfitrión o en un estado "
-"desconocido"
-
-#: nova/virt/vmwareapi/driver.py:398
+#: nova/virt/vmwareapi/driver.py:148
#, python-format
msgid "All clusters specified %s were not found in the vCenter"
msgstr "Todos los clusters especificados %s no fueron encontrados en vCenter"
-#: nova/virt/vmwareapi/driver.py:407
-#, python-format
-msgid "The following clusters could not be found in the vCenter %s"
-msgstr "Los siguientes clusters no pueden ser encontrados en el vcenter %s"
-
-#: nova/virt/vmwareapi/driver.py:544
+#: nova/virt/vmwareapi/driver.py:342
#, python-format
msgid "The resource %s does not exist"
msgstr "El recurso %s no existe"
-#: nova/virt/vmwareapi/driver.py:590
+#: nova/virt/vmwareapi/driver.py:404
#, python-format
msgid "Invalid cluster or resource pool name : %s"
msgstr "Cluster o nombre de pool de recursos inválido: %s"
-#: nova/virt/vmwareapi/driver.py:757
+#: nova/virt/vmwareapi/driver.py:582
msgid ""
"Multiple hosts may be managed by the VMWare vCenter driver; therefore we "
"do not return uptime for just one host."
@@ -9141,197 +8788,154 @@ msgstr ""
"vCenter de VMware; por lo tanto no se puede regresar tiempo de ejecución "
"solamente para un huésped."
-#: nova/virt/vmwareapi/driver.py:845
-#, python-format
-msgid ""
-"Unable to connect to server at %(server)s, sleeping for %(seconds)s "
-"seconds"
-msgstr ""
-"Incapaz de conectar al servidor en %(server)s, esperando durante "
-"%(seconds)s segundos"
-
-#: nova/virt/vmwareapi/driver.py:865
+#: nova/virt/vmwareapi/driver.py:705
#, python-format
msgid "Unable to validate session %s!"
msgstr "Incapaz de validar sesión %s!"
-#: nova/virt/vmwareapi/driver.py:906
+#: nova/virt/vmwareapi/driver.py:747
#, python-format
msgid "Session %s is inactive!"
msgstr "La sesión %s se encuentra inactiva!"
-#: nova/virt/vmwareapi/driver.py:954
-#, python-format
-msgid "In vmwareapi: _call_method (session=%s)"
-msgstr "En vmwareapi: _call_method (session=%s)"
-
-#: nova/virt/vmwareapi/driver.py:998
+#: nova/virt/vmwareapi/driver.py:838
#, python-format
msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s"
msgstr "Tarea [%(task_name)s] %(task_ref)s estado: error %(error_info)s"
-#: nova/virt/vmwareapi/driver.py:1008
+#: nova/virt/vmwareapi/driver.py:848
#, python-format
msgid "In vmwareapi:_poll_task, Got this error %s"
msgstr "En vmwareapi:_poll_task, se ha obtenido este error %s"
-#: nova/virt/vmwareapi/ds_util.py:38
+#: nova/virt/vmwareapi/ds_util.py:41
msgid "Datastore name cannot be None"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:40
+#: nova/virt/vmwareapi/ds_util.py:43
msgid "Datastore reference cannot be None"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:42
+#: nova/virt/vmwareapi/ds_util.py:45
msgid "Invalid capacity"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:45
+#: nova/virt/vmwareapi/ds_util.py:48
msgid "Capacity is smaller than free space"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:106
+#: nova/virt/vmwareapi/ds_util.py:111
msgid "datastore name empty"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:111
+#: nova/virt/vmwareapi/ds_util.py:116 nova/virt/vmwareapi/ds_util.py:148
msgid "path component cannot be None"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:144
+#: nova/virt/vmwareapi/ds_util.py:162
msgid "datastore path empty"
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:46
+#: nova/virt/vmwareapi/error_util.py:45
msgid "exception_summary must not be a list"
msgstr "exception_summary no debe ser una lista"
-#: nova/virt/vmwareapi/error_util.py:76
+#: nova/virt/vmwareapi/error_util.py:75
msgid "fault_list must be a list"
msgstr "fault_list debe ser una lista"
-#: nova/virt/vmwareapi/error_util.py:122
+#: nova/virt/vmwareapi/error_util.py:121
#, python-format
msgid "Error(s) %s occurred in the call to RetrievePropertiesEx"
msgstr "El(los) Error(es) %s han ocurrido en la llamada de RetrievePropertiesEx"
-#: nova/virt/vmwareapi/error_util.py:136
+#: nova/virt/vmwareapi/error_util.py:135
msgid "VMware Driver fault."
msgstr "Falla de controlador de VMware"
-#: nova/virt/vmwareapi/error_util.py:142
+#: nova/virt/vmwareapi/error_util.py:141
msgid "VMware Driver configuration fault."
msgstr "Falla de configuración de de controlador de VMware"
-#: nova/virt/vmwareapi/error_util.py:146
+#: nova/virt/vmwareapi/error_util.py:145
msgid "No default value for use_linked_clone found."
msgstr "No se ha encontrado un valor predeterminado para used_linked_clone"
-#: nova/virt/vmwareapi/error_util.py:150
+#: nova/virt/vmwareapi/error_util.py:149
#, python-format
msgid "Missing parameter : %(param)s"
msgstr "Parámetro omitido : %(param)s"
-#: nova/virt/vmwareapi/error_util.py:154
+#: nova/virt/vmwareapi/error_util.py:153
msgid "No root disk defined."
msgstr "No se ha definido un disco raíz."
-#: nova/virt/vmwareapi/error_util.py:158
+#: nova/virt/vmwareapi/error_util.py:157
msgid "Resource already exists."
msgstr "El recurso ya existe."
-#: nova/virt/vmwareapi/error_util.py:163
+#: nova/virt/vmwareapi/error_util.py:162
msgid "Cannot delete file."
msgstr "No se puede eliminar el archivo."
-#: nova/virt/vmwareapi/error_util.py:168
+#: nova/virt/vmwareapi/error_util.py:167
msgid "File already exists."
msgstr "El archivo ya existe."
-#: nova/virt/vmwareapi/error_util.py:173
+#: nova/virt/vmwareapi/error_util.py:172
msgid "File fault."
msgstr "Fallo de archivo."
-#: nova/virt/vmwareapi/error_util.py:178
+#: nova/virt/vmwareapi/error_util.py:177
msgid "File locked."
msgstr "Archivo bloqueado."
-#: nova/virt/vmwareapi/error_util.py:183
+#: nova/virt/vmwareapi/error_util.py:182
msgid "File not found."
msgstr "Archivo no encontrado."
-#: nova/virt/vmwareapi/error_util.py:188
+#: nova/virt/vmwareapi/error_util.py:187
msgid "Invalid property."
msgstr "Propiedad inválida."
-#: nova/virt/vmwareapi/error_util.py:193
+#: nova/virt/vmwareapi/error_util.py:192
msgid "No Permission."
msgstr "Sin permiso."
-#: nova/virt/vmwareapi/error_util.py:198
+#: nova/virt/vmwareapi/error_util.py:197
msgid "Not Authenticated."
msgstr "No autenticado."
-#: nova/virt/vmwareapi/error_util.py:203
+#: nova/virt/vmwareapi/error_util.py:202
msgid "Invalid Power State."
msgstr "Estado de energia inválido."
-#: nova/virt/vmwareapi/error_util.py:228
+#: nova/virt/vmwareapi/error_util.py:227
#, python-format
msgid "Fault %s not matched."
msgstr "El fallo %s no ha coincidido."
-#: nova/virt/vmwareapi/fake.py:243
-#, python-format
-msgid "Property %(attr)s not set for the managed object %(name)s"
-msgstr ""
-"La propiedad %(attr)s no se ha establecido para el objeto gestionado "
-"%(name)s"
-
-#: nova/virt/vmwareapi/fake.py:967
-msgid "There is no VM registered"
-msgstr "No hay ninguna VM registrada"
-
-#: nova/virt/vmwareapi/fake.py:969 nova/virt/vmwareapi/fake.py:1290
-#, python-format
-msgid "Virtual Machine with ref %s is not there"
-msgstr "La máquina virtual con la referencia %s no está allí"
-
-#: nova/virt/vmwareapi/fake.py:1052
-#, python-format
-msgid "Logging out a session that is invalid or already logged out: %s"
-msgstr "Finalizando sesión que no es válida o que ya ha finalizado: %s"
-
-#: nova/virt/vmwareapi/fake.py:1070
-msgid "Session Invalid"
-msgstr "Sesión no válida"
-
-#: nova/virt/vmwareapi/fake.py:1287
-msgid "No Virtual Machine has been registered yet"
-msgstr "No se ha registrado aún ninguna máquina virtual "
-
#: nova/virt/vmwareapi/imagecache.py:74
#, python-format
msgid "Unable to delete %(file)s. Exception: %(ex)s"
msgstr "Incapaz de remover %(file)s. Excepción: %(ex)s"
-#: nova/virt/vmwareapi/imagecache.py:148
+#: nova/virt/vmwareapi/imagecache.py:147
#, python-format
msgid "Image %s is no longer used by this node. Pending deletion!"
msgstr "La imagen %s ya no está en uso por este nodo. Remoción pendiente!"
-#: nova/virt/vmwareapi/imagecache.py:153
+#: nova/virt/vmwareapi/imagecache.py:152
#, python-format
msgid "Image %s is no longer used. Deleting!"
msgstr "La imagen %s ya no está en uso. Eliminando!"
-#: nova/virt/vmwareapi/io_util.py:121
+#: nova/virt/vmwareapi/io_util.py:122
#, python-format
msgid "Glance image %s is in killed state"
msgstr "La imagen Glance %s está en estado de matado"
-#: nova/virt/vmwareapi/io_util.py:129
+#: nova/virt/vmwareapi/io_util.py:130
#, python-format
msgid "Glance image %(image_id)s is in unknown state - %(state)s"
msgstr "La imagen Glance %(image_id)s está en estado desconocido - %(state)s"
@@ -9392,50 +8996,49 @@ msgstr "Excepción en %s "
msgid "Unable to retrieve value for %(path)s Reason: %(reason)s"
msgstr "Incapaz de obtener valor de %(path)s Razón: %(reason)s"
-#: nova/virt/vmwareapi/vm_util.py:195
+#: nova/virt/vmwareapi/vm_util.py:202
#, python-format
msgid "%s is not supported."
msgstr "%s no está soportada."
-#: nova/virt/vmwareapi/vm_util.py:980
+#: nova/virt/vmwareapi/vm_util.py:1037
msgid "No host available on cluster"
msgstr "No hay anfitrión disponible en cluster."
-#: nova/virt/vmwareapi/vm_util.py:1210
+#: nova/virt/vmwareapi/vm_util.py:1131
#, python-format
msgid "Failed to get cluster references %s"
msgstr "Fallo al obtener las referencias del cluster %s"
-#: nova/virt/vmwareapi/vm_util.py:1222
+#: nova/virt/vmwareapi/vm_util.py:1143
#, python-format
msgid "Failed to get resource pool references %s"
msgstr "Fallo al obtener las referencias del pool de recursos %s"
-#: nova/virt/vmwareapi/vm_util.py:1404
+#: nova/virt/vmwareapi/vm_util.py:1334
msgid "vmwareapi:vm_util:clone_vmref_for_instance, called with vm_ref=None"
msgstr ""
"vmwareapi:vm_util:clone_vmref_for_instance, ha sido llamada con "
"vm_ref=None"
-#: nova/virt/vmwareapi/vmops.py:131
+#: nova/virt/vmwareapi/vmops.py:132
#, python-format
msgid "Extending virtual disk failed with error: %s"
msgstr "La extensión del disco virtual ha fallado con el error: %s"
-#: nova/virt/vmwareapi/vmops.py:246
+#: nova/virt/vmwareapi/vmops.py:252
msgid "Image disk size greater than requested disk size"
msgstr "La imagen de disco es más grande que el tamaño del disco solicitado"
-#: nova/virt/vmwareapi/vmops.py:471
-#, python-format
-msgid "Root disk file creation failed - %s"
-msgstr "Fallo al crear el archivo del disco raíz - %s"
-
-#: nova/virt/vmwareapi/vmops.py:813
+#: nova/virt/vmwareapi/vmops.py:859
msgid "instance is not powered on"
msgstr "instancia no activada"
-#: nova/virt/vmwareapi/vmops.py:869
+#: nova/virt/vmwareapi/vmops.py:887
+msgid "Instance does not exist on backend"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:914
#, python-format
msgid ""
"In vmwareapi:vmops:_destroy_instance, got this exception while un-"
@@ -9444,36 +9047,33 @@ msgstr ""
"En vmwareapi:vmops:_destroy_instance, se obtuvo esta excepción mientras "
"se removía el registro de VM: %s"
-#: nova/virt/vmwareapi/vmops.py:892
-#, python-format
+#: nova/virt/vmwareapi/vmops.py:937
msgid ""
-"In vmwareapi:vmops:_destroy_instance, got this exception while deleting "
-"the VM contents from the disk: %s"
+"In vmwareapi:vmops:_destroy_instance, exception while deleting the VM "
+"contents from the disk"
msgstr ""
-"En vmwareapi:vmops:_destroy_instance, se obtuvo esta excepción mientras "
-"se removía el contenido de la VM del disco: %s"
-#: nova/virt/vmwareapi/vmops.py:926
+#: nova/virt/vmwareapi/vmops.py:969
msgid "pause not supported for vmwareapi"
msgstr "pausa no soportada para vmwareapi"
-#: nova/virt/vmwareapi/vmops.py:930
+#: nova/virt/vmwareapi/vmops.py:973
msgid "unpause not supported for vmwareapi"
msgstr "cancelación de pausa no soportada para vmwareapi"
-#: nova/virt/vmwareapi/vmops.py:948
+#: nova/virt/vmwareapi/vmops.py:991
msgid "instance is powered off and cannot be suspended."
msgstr "instancia está desactivada y no se puede suspender. "
-#: nova/virt/vmwareapi/vmops.py:968
+#: nova/virt/vmwareapi/vmops.py:1011
msgid "instance is not in a suspended state"
msgstr "la instancia no está en un estado suspendido"
-#: nova/virt/vmwareapi/vmops.py:1056
-msgid "instance is suspended and cannot be powered off."
-msgstr "la instancia está suspendida y no se puede desactivar "
+#: nova/virt/vmwareapi/vmops.py:1111
+msgid "Unable to shrink disk."
+msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1147
+#: nova/virt/vmwareapi/vmops.py:1170
#, python-format
msgid ""
"In vmwareapi:vmops:confirm_migration, got this exception while destroying"
@@ -9482,29 +9082,39 @@ msgstr ""
"En vmwareapi:vmops:confirm_migration, se ha obtenido esta excepción al "
"destruir la máquina virtual: %s"
-#: nova/virt/vmwareapi/vmops.py:1213 nova/virt/xenapi/vmops.py:1497
+#: nova/virt/vmwareapi/vmops.py:1246 nova/virt/xenapi/vmops.py:1500
#, python-format
msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
msgstr ""
"Se han encontrado %(instance_count)d rearranques colgados de más de "
"%(timeout)d segundos"
-#: nova/virt/vmwareapi/vmops.py:1217 nova/virt/xenapi/vmops.py:1501
+#: nova/virt/vmwareapi/vmops.py:1250 nova/virt/xenapi/vmops.py:1504
msgid "Automatically hard rebooting"
msgstr "Rearrancando automáticamente de forma permanente"
-#: nova/virt/vmwareapi/volumeops.py:217 nova/virt/vmwareapi/volumeops.py:251
+#: nova/virt/vmwareapi/vmops.py:1568
+#, python-format
+msgid "No device with interface-id %s exists on VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1578
+#, python-format
+msgid "No device with MAC address %s exists on the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:340 nova/virt/vmwareapi/volumeops.py:375
#, python-format
msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
msgstr ""
"El punto de montaje %(mountpoint)s esta unido a la instancia "
"%(instance_name)s"
-#: nova/virt/vmwareapi/volumeops.py:239 nova/virt/vmwareapi/volumeops.py:414
+#: nova/virt/vmwareapi/volumeops.py:363 nova/virt/vmwareapi/volumeops.py:538
msgid "Unable to find iSCSI Target"
msgstr "No se puede encontrar el destino iSCSI "
-#: nova/virt/vmwareapi/volumeops.py:337
+#: nova/virt/vmwareapi/volumeops.py:461
#, python-format
msgid ""
"The volume's backing has been relocated to %s. Need to consolidate "
@@ -9513,11 +9123,11 @@ msgstr ""
"El volúmen de apoyo ha sido reubicado a %s. Se necesita consolidar el "
"archivo de disco de apoyo."
-#: nova/virt/vmwareapi/volumeops.py:375 nova/virt/vmwareapi/volumeops.py:422
+#: nova/virt/vmwareapi/volumeops.py:499 nova/virt/vmwareapi/volumeops.py:546
msgid "Unable to find volume"
msgstr "No se puede encontrar volumen"
-#: nova/virt/vmwareapi/volumeops.py:395 nova/virt/vmwareapi/volumeops.py:424
+#: nova/virt/vmwareapi/volumeops.py:519 nova/virt/vmwareapi/volumeops.py:548
#: nova/virt/xenapi/volumeops.py:148
#, python-format
msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
@@ -9525,14 +9135,14 @@ msgstr ""
"El punto de montaje %(mountpoint)s se desligó de la instancia "
"%(instance_name)s"
-#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:1768
+#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:1777
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
msgstr ""
"TIEMPO DE ESPERA EXCEDIDO: La llamada a %(method)s ha excedido el tiempo "
"de espera. args=%(args)r"
-#: nova/virt/xenapi/agent.py:117 nova/virt/xenapi/vmops.py:1773
+#: nova/virt/xenapi/agent.py:117 nova/virt/xenapi/vmops.py:1782
#, python-format
msgid ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
@@ -9541,7 +9151,7 @@ msgstr ""
"SIN IMPLEMENTAR: el agente no soporta la llamada a %(method)s. "
"args=%(args)r"
-#: nova/virt/xenapi/agent.py:122 nova/virt/xenapi/vmops.py:1778
+#: nova/virt/xenapi/agent.py:122 nova/virt/xenapi/vmops.py:1787
#, python-format
msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
msgstr "La llamada a %(method)s ha devuelto un error: %(e)s. args=%(args)r"
@@ -9615,21 +9225,21 @@ msgstr ""
msgid "Failure while cleaning up attached VDIs"
msgstr "Error al limpiar VDI conectados "
-#: nova/virt/xenapi/driver.py:386
+#: nova/virt/xenapi/driver.py:390
#, python-format
msgid "Could not determine key: %s"
msgstr "No se ha podido determinar la clave: %s"
-#: nova/virt/xenapi/driver.py:632
+#: nova/virt/xenapi/driver.py:641
msgid "Host startup on XenServer is not supported."
msgstr "No se soporta el arranque de host en XenServer."
-#: nova/virt/xenapi/fake.py:812
+#: nova/virt/xenapi/fake.py:820
#, python-format
msgid "xenapi.fake does not have an implementation for %s"
msgstr "xenapi.fake no tiene una implementación para %s"
-#: nova/virt/xenapi/fake.py:920
+#: nova/virt/xenapi/fake.py:928
#, python-format
msgid ""
"xenapi.fake does not have an implementation for %s or it has been called "
@@ -9638,7 +9248,7 @@ msgstr ""
"xenapi.fake no tiene una implementación para %s o ha sido llamada con un "
"número incorrecto de argumentos"
-#: nova/virt/xenapi/host.py:74
+#: nova/virt/xenapi/host.py:73
#, python-format
msgid ""
"Instance %(name)s running on %(host)s could not be found in the database:"
@@ -9648,21 +9258,21 @@ msgstr ""
"encontrar en la base de datos, suponiendo que se trata de una máquina "
"virtual de trabajador y se salta la migración de ping a un nuevo host"
-#: nova/virt/xenapi/host.py:86
+#: nova/virt/xenapi/host.py:85
#, python-format
msgid "Aggregate for host %(host)s count not be found."
msgstr "No se ha podido encontrar el agregado para el host %(host)s. "
-#: nova/virt/xenapi/host.py:105
+#: nova/virt/xenapi/host.py:104
#, python-format
msgid "Unable to migrate VM %(vm_ref)s from %(host)s"
msgstr "Incapaz de migrar VM %(vm_ref)s desde %(host)s"
-#: nova/virt/xenapi/host.py:186
+#: nova/virt/xenapi/host.py:185
msgid "Failed to parse information about a pci device for passthrough"
msgstr "Fallo al pasar información sobre el dispositivo pci para el traspaso"
-#: nova/virt/xenapi/host.py:259
+#: nova/virt/xenapi/host.py:258
#, python-format
msgid ""
"Hostname has changed from %(old)s to %(new)s. A restart is required to "
@@ -9671,16 +9281,16 @@ msgstr ""
"El nombre del anfitrión ha cambiado de %(old)s a %(new)s. Se requiere un "
"reinicio para hacer efecto."
-#: nova/virt/xenapi/host.py:284
+#: nova/virt/xenapi/host.py:283
#, python-format
msgid "Failed to extract instance support from %s"
msgstr "No se ha podido extraer el soporte de instancia de %s"
-#: nova/virt/xenapi/host.py:301
+#: nova/virt/xenapi/host.py:300
msgid "Unable to get updated status"
msgstr "No se puede obtener el estado actualizado"
-#: nova/virt/xenapi/host.py:304
+#: nova/virt/xenapi/host.py:303
#, python-format
msgid "The call to %(method)s returned an error: %(e)s."
msgstr "La llamada a %(method)s ha devuelto un error: %(e)s."
@@ -9758,7 +9368,7 @@ msgstr ""
"PIF %(pif_uuid)s para la red %(bridge)s tiene identificador de VLAN "
"%(pif_vlan)d. Se esperaba %(vlan_num)d"
-#: nova/virt/xenapi/vm_utils.py:208
+#: nova/virt/xenapi/vm_utils.py:210
#, python-format
msgid ""
"Device id %(id)s specified is not supported by hypervisor version "
@@ -9767,16 +9377,16 @@ msgstr ""
"El dispositivo con identificador %(id)s especificado no está soportado "
"por la versión del hipervisor %(version)s"
-#: nova/virt/xenapi/vm_utils.py:325 nova/virt/xenapi/vm_utils.py:340
+#: nova/virt/xenapi/vm_utils.py:328 nova/virt/xenapi/vm_utils.py:343
msgid "VM already halted, skipping shutdown..."
msgstr "VM ya se ha detenido, omitiendo la conclusión... "
-#: nova/virt/xenapi/vm_utils.py:392
+#: nova/virt/xenapi/vm_utils.py:395
#, python-format
msgid "VBD %s already detached"
msgstr "VBD %s ya se ha desconectado"
-#: nova/virt/xenapi/vm_utils.py:395
+#: nova/virt/xenapi/vm_utils.py:398
#, python-format
msgid ""
"VBD %(vbd_ref)s uplug failed with \"%(err)s\", attempt "
@@ -9785,36 +9395,36 @@ msgstr ""
"La desconexión del VBD %(vbd_ref)s ha fallado con \"%(err)s\", intento "
"%(num_attempt)d/%(max_attempts)d"
-#: nova/virt/xenapi/vm_utils.py:402
+#: nova/virt/xenapi/vm_utils.py:405
#, python-format
msgid "Unable to unplug VBD %s"
msgstr "Imposible desconectar VBD %s"
-#: nova/virt/xenapi/vm_utils.py:405
+#: nova/virt/xenapi/vm_utils.py:408
#, python-format
msgid "Reached maximum number of retries trying to unplug VBD %s"
msgstr "Se ha alcanzado el número máximo de reintentos de desconectar VBD %s "
-#: nova/virt/xenapi/vm_utils.py:417
+#: nova/virt/xenapi/vm_utils.py:420
#, python-format
msgid "Unable to destroy VBD %s"
msgstr "Imposible destruir VBD %s"
-#: nova/virt/xenapi/vm_utils.py:470
+#: nova/virt/xenapi/vm_utils.py:473
#, python-format
msgid "Unable to destroy VDI %s"
msgstr "No se puede destruir VDI %s"
-#: nova/virt/xenapi/vm_utils.py:516
+#: nova/virt/xenapi/vm_utils.py:519
msgid "SR not present and could not be introduced"
msgstr "SR no está presente y no se ha podido introducir"
-#: nova/virt/xenapi/vm_utils.py:700
+#: nova/virt/xenapi/vm_utils.py:703
#, python-format
msgid "No primary VDI found for %s"
msgstr "No se ha encontrado VDI primario para %s"
-#: nova/virt/xenapi/vm_utils.py:792
+#: nova/virt/xenapi/vm_utils.py:795
#, python-format
msgid ""
"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s"
@@ -9823,12 +9433,12 @@ msgstr ""
"Solo los SRs basados en archivo (ext/NFS) están soportados por esta "
"característica. SR %(uuid)s es del tipo %(type)s"
-#: nova/virt/xenapi/vm_utils.py:871
+#: nova/virt/xenapi/vm_utils.py:874
#, python-format
msgid "Multiple base images for image: %s"
msgstr "Múltiple imágenes base para la imagen: %s"
-#: nova/virt/xenapi/vm_utils.py:926
+#: nova/virt/xenapi/vm_utils.py:929
#, python-format
msgid ""
"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor "
@@ -9837,31 +9447,31 @@ msgstr ""
"El VDI %(vdi_ref)s es de %(virtual_size)d bytes lo que es mayor que el "
"tamaño del sabor de %(new_disk_size)d bytes."
-#: nova/virt/xenapi/vm_utils.py:937 nova/virt/xenapi/vmops.py:1037
+#: nova/virt/xenapi/vm_utils.py:940 nova/virt/xenapi/vmops.py:1040
msgid "Can't resize a disk to 0 GB."
msgstr "No se puede cambiar el tamaño de archivo a 0 GB."
-#: nova/virt/xenapi/vm_utils.py:989
+#: nova/virt/xenapi/vm_utils.py:992
msgid "Disk must have only one partition."
msgstr "el disco debe tener una sola partición."
-#: nova/virt/xenapi/vm_utils.py:994
+#: nova/virt/xenapi/vm_utils.py:997
#, python-format
msgid "Disk contains a filesystem we are unable to resize: %s"
msgstr ""
"El disco contiene un sistema de archivos incapaz de modificar su tamaño: "
"%s"
-#: nova/virt/xenapi/vm_utils.py:999
+#: nova/virt/xenapi/vm_utils.py:1002
msgid "The only partition should be partition 1."
msgstr "La unica partición debe ser la partición 1."
-#: nova/virt/xenapi/vm_utils.py:1010
+#: nova/virt/xenapi/vm_utils.py:1013
#, python-format
msgid "Attempted auto_configure_disk failed because: %s"
msgstr "El intento de auto_configure_disk ha fallado por: %s"
-#: nova/virt/xenapi/vm_utils.py:1261
+#: nova/virt/xenapi/vm_utils.py:1264
#, python-format
msgid ""
"Fast cloning is only supported on default local SR of type ext. SR on "
@@ -9871,24 +9481,24 @@ msgstr ""
"ext. Se ha encontrado que los SR de este sistema son de tipo %s. "
"Ignorando el identificador cow."
-#: nova/virt/xenapi/vm_utils.py:1336
+#: nova/virt/xenapi/vm_utils.py:1339
#, python-format
msgid "Unrecognized cache_images value '%s', defaulting to True"
msgstr ""
"Valor cache_images no reconocido '%s', se toma True como valor "
"predeterminado"
-#: nova/virt/xenapi/vm_utils.py:1412
+#: nova/virt/xenapi/vm_utils.py:1415
#, python-format
msgid "Invalid value '%s' for torrent_images"
msgstr "valor inválido '%s' para torrent_images"
-#: nova/virt/xenapi/vm_utils.py:1435
+#: nova/virt/xenapi/vm_utils.py:1438
#, python-format
msgid "Invalid value '%d' for image_compression_level"
msgstr "Valor inválido '%d' para image_compression_level"
-#: nova/virt/xenapi/vm_utils.py:1461
+#: nova/virt/xenapi/vm_utils.py:1464
#, python-format
msgid ""
"Download handler '%(handler)s' raised an exception, falling back to "
@@ -9897,14 +9507,14 @@ msgstr ""
"La descarga del manejador '%(handler)s' ha arrojado una excepción, "
"restaurando hacia el manejador predeterminado '%(default_handler)s"
-#: nova/virt/xenapi/vm_utils.py:1517
+#: nova/virt/xenapi/vm_utils.py:1520
#, python-format
msgid "Image size %(size)d exceeded flavor allowed size %(allowed_size)d"
msgstr ""
"El tamaño de la imagen %(size)d excede el tamaño permitido por el sabor "
"%(allowed_size)d"
-#: nova/virt/xenapi/vm_utils.py:1568
+#: nova/virt/xenapi/vm_utils.py:1571
#, python-format
msgid ""
"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d "
@@ -9913,26 +9523,26 @@ msgstr ""
"La imagen de kernel/disco RAM es demasiado grande: %(vdi_size)d bytes, "
"máx. %(max_size)d bytes"
-#: nova/virt/xenapi/vm_utils.py:1610
+#: nova/virt/xenapi/vm_utils.py:1613
msgid "Failed to fetch glance image"
msgstr "No se ha podido captar la imagen glance"
-#: nova/virt/xenapi/vm_utils.py:1818
+#: nova/virt/xenapi/vm_utils.py:1846
#, python-format
msgid "Unable to parse rrd of %s"
msgstr "Incapaz de analizar rrd de %s"
-#: nova/virt/xenapi/vm_utils.py:1848
+#: nova/virt/xenapi/vm_utils.py:1876
#, python-format
msgid "Retry SR scan due to error: %s"
msgstr "Reintentando escaneo de SR debido a error: %s"
-#: nova/virt/xenapi/vm_utils.py:1881
+#: nova/virt/xenapi/vm_utils.py:1909
#, python-format
msgid "Flag sr_matching_filter '%s' does not respect formatting convention"
msgstr "El distintivo sr_matching_filter '%s' no respeta el convenio de formato"
-#: nova/virt/xenapi/vm_utils.py:1902
+#: nova/virt/xenapi/vm_utils.py:1930
msgid ""
"XenAPI is unable to find a Storage Repository to install guest instances "
"on. Please check your configuration (e.g. set a default SR for the pool) "
@@ -9943,11 +9553,11 @@ msgstr ""
"establece un SR predeterminado en el conjunto) y/o ocnfigura el "
"identificador 'sr_matching_filter'."
-#: nova/virt/xenapi/vm_utils.py:1915
+#: nova/virt/xenapi/vm_utils.py:1943
msgid "Cannot find SR of content-type ISO"
msgstr "No se puede encontrar SR de content-type ISO"
-#: nova/virt/xenapi/vm_utils.py:1968
+#: nova/virt/xenapi/vm_utils.py:1996
#, python-format
msgid ""
"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: "
@@ -9956,22 +9566,22 @@ msgstr ""
"No se ha podido obtener XML RRD para la máquina virtual %(vm_uuid)s con "
"los detalles de servidor: %(server)s."
-#: nova/virt/xenapi/vm_utils.py:2096
+#: nova/virt/xenapi/vm_utils.py:2124
#, python-format
msgid "VHD coalesce attempts exceeded (%d), giving up..."
msgstr "Intentos de incorporación de VHD excedidos (%d), dejando de intentar..."
-#: nova/virt/xenapi/vm_utils.py:2131
+#: nova/virt/xenapi/vm_utils.py:2159
#, python-format
msgid "Timeout waiting for device %s to be created"
msgstr "Se ha excedido el tiempo esperando a que se creara el dispositivo %s"
-#: nova/virt/xenapi/vm_utils.py:2151
+#: nova/virt/xenapi/vm_utils.py:2179
#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr "Desconectando VDI obsoleto %s de domU de cálculo "
-#: nova/virt/xenapi/vm_utils.py:2309
+#: nova/virt/xenapi/vm_utils.py:2337
msgid ""
"Shrinking the filesystem down with resize2fs has failed, please check if "
"you have enough free space on your disk."
@@ -9979,40 +9589,40 @@ msgstr ""
"La reducción del sistema de archivos con resize2fs ha fallado, por favor "
"verifica si tienes espacio libre suficiente en tu disco."
-#: nova/virt/xenapi/vm_utils.py:2444
+#: nova/virt/xenapi/vm_utils.py:2472
msgid "Manipulating interface files directly"
msgstr "Manipulando archivos de interfaz directamente "
-#: nova/virt/xenapi/vm_utils.py:2453
+#: nova/virt/xenapi/vm_utils.py:2481
#, python-format
msgid "Failed to mount filesystem (expected for non-linux instances): %s"
msgstr ""
"No se ha podido montar sistema de archivos (se espera para instancias no "
"Linux): %s "
-#: nova/virt/xenapi/vm_utils.py:2564
+#: nova/virt/xenapi/vm_utils.py:2496
msgid "This domU must be running on the host specified by connection_url"
msgstr ""
"Este domU debe estar en ejecución en el anfitrión especificado por "
"connection_url"
-#: nova/virt/xenapi/vm_utils.py:2633
+#: nova/virt/xenapi/vm_utils.py:2565
msgid "Failed to transfer vhd to new host"
msgstr "No se ha podido transferir vhd al nuevo host"
-#: nova/virt/xenapi/vm_utils.py:2659
+#: nova/virt/xenapi/vm_utils.py:2591
msgid "ipxe_boot_menu_url not set, user will have to enter URL manually..."
msgstr ""
"ipxe_boot_menu_url no establecido, el usuario debe ingresar la URL "
"manualmente..."
-#: nova/virt/xenapi/vm_utils.py:2665
+#: nova/virt/xenapi/vm_utils.py:2597
msgid "ipxe_network_name not set, user will have to enter IP manually..."
msgstr ""
"ipxe_network_name no establecido, el usuario debe ingresar la dirección "
"IP manualmente..."
-#: nova/virt/xenapi/vm_utils.py:2676
+#: nova/virt/xenapi/vm_utils.py:2608
#, python-format
msgid ""
"Unable to find network matching '%(network_name)s', user will have to "
@@ -10021,7 +9631,7 @@ msgstr ""
"Incapaz de encontrar red coincidente '%(network_name)s', el usuario "
"deberá introducir una dirección IP manualmente..."
-#: nova/virt/xenapi/vm_utils.py:2700
+#: nova/virt/xenapi/vm_utils.py:2632
#, python-format
msgid "ISO creation tool '%s' does not exist."
msgstr "La herramienta de creación de ISO '%s' no esiste."
@@ -10030,42 +9640,42 @@ msgstr "La herramienta de creación de ISO '%s' no esiste."
msgid "Error: Agent is disabled"
msgstr "Error: El agente está inhabilitado"
-#: nova/virt/xenapi/vmops.py:375
+#: nova/virt/xenapi/vmops.py:378
msgid "ipxe_boot is True but no ISO image found"
msgstr "ipxe_boot establecido en True pero no se ha encontrado imagen ISO"
-#: nova/virt/xenapi/vmops.py:518
+#: nova/virt/xenapi/vmops.py:521
msgid "Failed to spawn, rolling back"
msgstr "No se ha podido generar, retrotrayendo"
-#: nova/virt/xenapi/vmops.py:783
+#: nova/virt/xenapi/vmops.py:786
msgid "Unable to terminate instance."
msgstr "Incapaz de terminar instancia."
-#: nova/virt/xenapi/vmops.py:835
+#: nova/virt/xenapi/vmops.py:838
#, python-format
msgid "_migrate_disk_resizing_down failed. Restoring orig vm due_to: %s."
msgstr "_migrate_disk_resizing_down ha fallado. Restaurando vm original due_to: %s"
-#: nova/virt/xenapi/vmops.py:989
+#: nova/virt/xenapi/vmops.py:992
#, python-format
msgid "_migrate_disk_resizing_up failed. Restoring orig vm due_to: %s."
msgstr "_migrate_disk_resizing_up fallido. Restaurando vm original due_to: %s."
-#: nova/virt/xenapi/vmops.py:996
+#: nova/virt/xenapi/vmops.py:999
#, python-format
msgid "_migrate_disk_resizing_up failed to rollback: %s"
msgstr "_migrate_disk_rezising_up fallido al revertir: %s"
-#: nova/virt/xenapi/vmops.py:1013
+#: nova/virt/xenapi/vmops.py:1016
msgid "Can't resize down ephemeral disks."
msgstr "No se puede reducir el tamaño de los discos efímeros."
-#: nova/virt/xenapi/vmops.py:1124
+#: nova/virt/xenapi/vmops.py:1127
msgid "Starting halted instance found during reboot"
msgstr "Iniciando instancia detenida encontrada durante rearranque"
-#: nova/virt/xenapi/vmops.py:1130
+#: nova/virt/xenapi/vmops.py:1133
msgid ""
"Reboot failed due to bad volumes, detaching bad volumes and starting "
"halted instance"
@@ -10073,65 +9683,65 @@ msgstr ""
"Se ha encontrado un error en el rearranque debido a volúmenes erróneos; "
"se van a desconectar los volúmenes erróneos e iniciar la instancia parada"
-#: nova/virt/xenapi/vmops.py:1208
+#: nova/virt/xenapi/vmops.py:1211
msgid "Unable to update metadata, VM not found."
msgstr "Incapaz de actualizar metadatos, la VM no ha sido encontrada."
-#: nova/virt/xenapi/vmops.py:1254
+#: nova/virt/xenapi/vmops.py:1257
msgid "Unable to find root VBD/VDI for VM"
msgstr "No se puede encontrar VBD/VDI de raíz para VM"
-#: nova/virt/xenapi/vmops.py:1292
+#: nova/virt/xenapi/vmops.py:1295
msgid "instance has a kernel or ramdisk but not both"
msgstr "la instancia tiene un kernel o un disco RAM, pero no ambos"
-#: nova/virt/xenapi/vmops.py:1326
+#: nova/virt/xenapi/vmops.py:1329
msgid "Destroying VM"
msgstr "Destruyendo VM "
-#: nova/virt/xenapi/vmops.py:1355
+#: nova/virt/xenapi/vmops.py:1358
msgid "VM is not present, skipping destroy..."
msgstr "VM no está presente, omitiendo destrucción... "
-#: nova/virt/xenapi/vmops.py:1406
+#: nova/virt/xenapi/vmops.py:1409
#, python-format
msgid "Instance is already in Rescue Mode: %s"
msgstr "La instancia ya está en modalidad de rescate: %s "
-#: nova/virt/xenapi/vmops.py:1448
+#: nova/virt/xenapi/vmops.py:1451
msgid "VM is not present, skipping soft delete..."
msgstr "VM no está presente, omitiendo supresión no permanente... "
-#: nova/virt/xenapi/vmops.py:1834
+#: nova/virt/xenapi/vmops.py:1843
#, python-format
msgid "Destination host:%s must be in the same aggregate as the source server"
msgstr ""
"El anfitrión destino: %s debe estar en el mismo agregado que el servidor "
"fuente"
-#: nova/virt/xenapi/vmops.py:1855
+#: nova/virt/xenapi/vmops.py:1864
msgid "No suitable network for migrate"
msgstr "No hay red adecuada para migrar"
-#: nova/virt/xenapi/vmops.py:1861
+#: nova/virt/xenapi/vmops.py:1870
#, python-format
msgid "PIF %s does not contain IP address"
msgstr "PIC %s no contiene una dirección IP"
-#: nova/virt/xenapi/vmops.py:1874
+#: nova/virt/xenapi/vmops.py:1883
msgid "Migrate Receive failed"
msgstr "Ha fallado la recepción de migración"
-#: nova/virt/xenapi/vmops.py:1948
+#: nova/virt/xenapi/vmops.py:1957
msgid "XAPI supporting relax-xsm-sr-check=true required"
msgstr "Se requiere una XAPI que soporte relax-xsm-sr-check=true"
-#: nova/virt/xenapi/vmops.py:1959
+#: nova/virt/xenapi/vmops.py:1968
#, python-format
msgid "assert_can_migrate failed because: %s"
msgstr "assert_can_migrate ha fallado debido a: %s"
-#: nova/virt/xenapi/vmops.py:2019
+#: nova/virt/xenapi/vmops.py:2028
msgid "Migrate Send failed"
msgstr "Ha fallado el envío de migración"
@@ -10188,6 +9798,11 @@ msgstr "Punto de montaje no puede ser traducido: %s"
msgid "Unable to find SR from VBD %s"
msgstr "Imposible encontrar SR en VBD %s"
+#: nova/virt/xenapi/volume_utils.py:311
+#, python-format
+msgid "Unable to find SR from VDI %s"
+msgstr ""
+
#: nova/virt/xenapi/volumeops.py:63
#, python-format
msgid "Connected volume (vdi_uuid): %s"
@@ -10274,11 +9889,16 @@ msgstr "Error inesperado: %s "
msgid "Starting nova-xvpvncproxy node (version %s)"
msgstr "Iniciando el nodo nova-xvpvncproxy (versión %s)"
-#: nova/volume/cinder.py:236
+#: nova/volume/cinder.py:257
+#, python-format
+msgid "Invalid client version, must be one of: %s"
+msgstr ""
+
+#: nova/volume/cinder.py:281
msgid "status must be 'in-use'"
msgstr "el estado debe estar 'in-use'"
-#: nova/volume/cinder.py:242
+#: nova/volume/cinder.py:287
msgid "status must be 'available'"
msgstr "el estado debe ser 'available'"
diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-critical.po b/nova/locale/fr/LC_MESSAGES/nova-log-critical.po
index cf97a0c368..254ad5bfbb 100644
--- a/nova/locale/fr/LC_MESSAGES/nova-log-critical.po
+++ b/nova/locale/fr/LC_MESSAGES/nova-log-critical.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-24 06:06+0000\n"
+"POT-Creation-Date: 2014-08-12 06:05+0000\n"
"PO-Revision-Date: 2014-05-30 06:26+0000\n"
"Last-Translator: FULL NAME \n"
"Language-Team: French (http://www.transifex.com/projects/p/nova/language/"
@@ -19,5 +19,18 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
-#~ msgid "Dummy message for transifex setup."
-#~ msgstr "message fictif pour la configuration transifex"
+#: nova/api/openstack/__init__.py:331
+#, python-format
+msgid "Missing core API extensions: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:658
+#, python-format
+msgid ""
+"Unable to connect to server at %(server)s, sleeping for %(seconds)s seconds"
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:767
+#, python-format
+msgid "In vmwareapi: _call_method (session=%s)"
+msgstr ""
diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-error.po b/nova/locale/fr/LC_MESSAGES/nova-log-error.po
index dbe999d332..55a80387db 100644
--- a/nova/locale/fr/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/fr/LC_MESSAGES/nova-log-error.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:08+0000\n"
+"POT-Creation-Date: 2014-08-18 06:04+0000\n"
"PO-Revision-Date: 2014-06-14 19:30+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: French (http://www.transifex.com/projects/p/nova/language/"
@@ -39,11 +39,305 @@ msgstr ""
msgid "Exception running %(name)s post-hook: %(obj)s"
msgstr ""
-#: nova/api/ec2/__init__.py:243
+#: nova/api/ec2/__init__.py:244
#, python-format
msgid "Keystone failure: %s"
msgstr ""
+#: nova/api/ec2/__init__.py:493
+#, python-format
+msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:520
+#, python-format
+msgid "Environment: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:155
+#, python-format
+msgid "Failed to get metadata for ip: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:212
+#, python-format
+msgid "Failed to get metadata for instance id: %s"
+msgstr ""
+
+#: nova/api/openstack/common.py:134
+#, python-format
+msgid ""
+"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad "
+"upgrade or db corrupted?"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:684
+#, python-format
+msgid "Exception handling resource: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:68
+#, python-format
+msgid "Compute.api::pause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:90
+#, python-format
+msgid "Compute.api::unpause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:112
+#, python-format
+msgid "compute.api::suspend %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:134
+#, python-format
+msgid "compute.api::resume %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:160
+#, python-format
+msgid "Error in migrate %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:179
+#, python-format
+msgid "Compute.api::reset_network %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:198
+#, python-format
+msgid "Compute.api::inject_network_info %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:215
+#, python-format
+msgid "Compute.api::lock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:234
+#, python-format
+msgid "Compute.api::unlock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:392
+#, python-format
+msgid "Compute.api::resetState %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/multinic.py:85
+#, python-format
+msgid "Unable to find address %r"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85
+msgid "Failed to get default networks"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125
+msgid "Failed to update usages deallocating network."
+msgstr ""
+
+#: nova/compute/api.py:561
+msgid "Failed to set instance name using multi_instance_display_name_template."
+msgstr ""
+
+#: nova/compute/api.py:1429
+msgid ""
+"Something wrong happened when trying to delete snapshot from shelved "
+"instance."
+msgstr ""
+
+#: nova/compute/api.py:3732
+msgid "Failed to update usages deallocating security group"
+msgstr ""
+
+#: nova/compute/flavors.py:167
+#, python-format
+msgid "DB error: %s"
+msgstr ""
+
+#: nova/compute/flavors.py:178
+#, python-format
+msgid "Instance type %s not found for deletion"
+msgstr ""
+
+#: nova/compute/manager.py:366
+#, python-format
+msgid "Error while trying to clean up image %s"
+msgstr ""
+
+#: nova/compute/manager.py:755
+msgid "Failed to check if instance shared"
+msgstr ""
+
+#: nova/compute/manager.py:821 nova/compute/manager.py:872
+msgid "Failed to complete a deletion"
+msgstr ""
+
+#: nova/compute/manager.py:913
+msgid "Failed to stop instance"
+msgstr ""
+
+#: nova/compute/manager.py:925
+msgid "Failed to start instance"
+msgstr ""
+
+#: nova/compute/manager.py:950
+msgid "Failed to revert crashed migration"
+msgstr ""
+
+#: nova/compute/manager.py:1364
+msgid "Failed to dealloc network for deleted instance"
+msgstr ""
+
+#: nova/compute/manager.py:1385
+msgid "Failed to dealloc network for failed instance"
+msgstr ""
+
+#: nova/compute/manager.py:1458 nova/compute/manager.py:3527
+msgid "Error trying to reschedule"
+msgstr ""
+
+#: nova/compute/manager.py:1567
+#, python-format
+msgid "Instance failed network setup after %(attempts)d attempt(s)"
+msgstr ""
+
+#: nova/compute/manager.py:1761
+msgid "Instance failed block device setup"
+msgstr ""
+
+#: nova/compute/manager.py:1781 nova/compute/manager.py:2123
+#: nova/compute/manager.py:4071
+msgid "Instance failed to spawn"
+msgstr ""
+
+#: nova/compute/manager.py:1964
+msgid "Unexpected build failure, not rescheduling build."
+msgstr ""
+
+#: nova/compute/manager.py:2033 nova/compute/manager.py:2085
+msgid "Failed to allocate network(s)"
+msgstr ""
+
+#: nova/compute/manager.py:2111
+msgid "Failure prepping block device"
+msgstr ""
+
+#: nova/compute/manager.py:2144
+msgid "Failed to deallocate networks"
+msgstr ""
+
+#: nova/compute/manager.py:2374 nova/compute/manager.py:3718
+#: nova/compute/manager.py:5822
+msgid "Setting instance vm_state to ERROR"
+msgstr ""
+
+#: nova/compute/manager.py:2586 nova/compute/manager.py:4933
+#, python-format
+msgid "Failed to get compute_info for %s"
+msgstr ""
+
+#: nova/compute/manager.py:3013
+#, python-format
+msgid "set_admin_password failed: %s"
+msgstr ""
+
+#: nova/compute/manager.py:3098
+msgid "Error trying to Rescue Instance"
+msgstr ""
+
+#: nova/compute/manager.py:3724
+#, python-format
+msgid "Failed to rollback quota for failed finish_resize: %s"
+msgstr ""
+
+#: nova/compute/manager.py:4323
+#, python-format
+msgid "Failed to attach %(volume_id)s at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4362
+#, python-format
+msgid "Failed to detach volume %(volume_id)s from %(mp)s"
+msgstr ""
+
+#: nova/compute/manager.py:4441
+#, python-format
+msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s"
+msgstr ""
+
+#: nova/compute/manager.py:4448
+#, python-format
+msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4735
+#, python-format
+msgid "Pre live migration failed at %s"
+msgstr ""
+
+#: nova/compute/manager.py:5235
+msgid "Periodic task failed to offload instance."
+msgstr ""
+
+#: nova/compute/manager.py:5275
+#, python-format
+msgid "Failed to generate usage audit for instance on host %s"
+msgstr ""
+
+#: nova/compute/manager.py:5465
+msgid ""
+"Periodic sync_power_state task had an error while processing an instance."
+msgstr ""
+
+#: nova/compute/manager.py:5568 nova/compute/manager.py:5577
+#: nova/compute/manager.py:5608 nova/compute/manager.py:5619
+msgid "error during stop() in sync_power_state."
+msgstr ""
+
+#: nova/network/neutronv2/api.py:234
+#, python-format
+msgid "Neutron error creating port on network %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:418
+#, python-format
+msgid "Failed to update port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:425
+#, python-format
+msgid "Failed to delete port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524
+#, python-format
+msgid "Failed to delete neutron port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:697
+#, python-format
+msgid "Failed to access port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:931
+#, python-format
+msgid "Unable to access floating IP %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1065
+#, python-format
+msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1124
+#, python-format
+msgid "Unable to update host of port %s"
+msgstr ""
+
#: nova/objects/instance_fault.py:87
msgid "Failed to notify cells of instance fault"
msgstr "Impossible d'avertir les cellules de l'erreur d'instance"
@@ -58,35 +352,35 @@ msgstr "Exception d'origine en cours de suppression : %s"
msgid "Unexpected exception occurred %d time(s)... retrying."
msgstr "Exception inattendue survenue %d fois... Nouvel essai."
-#: nova/openstack/common/lockutils.py:120
+#: nova/openstack/common/lockutils.py:119
#, python-format
msgid "Could not release the acquired lock `%s`"
msgstr ""
-#: nova/openstack/common/loopingcall.py:89
+#: nova/openstack/common/loopingcall.py:95
msgid "in fixed duration looping call"
msgstr "dans l'appel en boucle de durée fixe"
-#: nova/openstack/common/loopingcall.py:136
+#: nova/openstack/common/loopingcall.py:138
msgid "in dynamic looping call"
msgstr "dans l'appel en boucle dynamique"
-#: nova/openstack/common/periodic_task.py:179
+#: nova/openstack/common/periodic_task.py:202
#, python-format
msgid "Error during %(full_task_name)s: %(e)s"
msgstr "Erreur pendant %(full_task_name)s : %(e)s"
-#: nova/openstack/common/policy.py:511
+#: nova/openstack/common/policy.py:507
#, python-format
msgid "Failed to understand rule %s"
msgstr "Règle %s incompréhensible"
-#: nova/openstack/common/policy.py:521
+#: nova/openstack/common/policy.py:517
#, python-format
msgid "No handler for matches of kind %s"
msgstr "Aucun gestionnaire pour les correspondances de type %s"
-#: nova/openstack/common/policy.py:791
+#: nova/openstack/common/policy.py:787
#, python-format
msgid "Failed to understand rule %r"
msgstr "Règle %r incompréhensible"
@@ -116,170 +410,184 @@ msgstr "Exception BD encapsulée."
msgid "Failed to migrate to version %s on engine %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:625
+#: nova/pci/pci_stats.py:119
+msgid ""
+"Failed to allocate PCI devices for instance. Unassigning devices back to "
+"pools. This should not happen, since the scheduler should have accurate "
+"information, and allocation during claims is controlled via a hold on the "
+"compute node semaphore"
+msgstr ""
+
+#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109
+#, python-format
+msgid "PCI device %s not found"
+msgstr ""
+
+#: nova/virt/disk/api.py:388
+#, python-format
+msgid ""
+"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:639
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:749
+#: nova/virt/libvirt/driver.py:764
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:873
+#: nova/virt/libvirt/driver.py:927
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:889
-msgid "During wait destroy, instance disappeared."
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:951
+#: nova/virt/libvirt/driver.py:1005
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:977
+#: nova/virt/libvirt/driver.py:1033
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1389
+#: nova/virt/libvirt/driver.py:1444
msgid "attaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1414
+#: nova/virt/libvirt/driver.py:1471
msgid "detaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1663
+#: nova/virt/libvirt/driver.py:1726
msgid "Failed to send updated snapshot status to volume service."
msgstr ""
-#: nova/virt/libvirt/driver.py:1749
+#: nova/virt/libvirt/driver.py:1834
msgid ""
"Unable to create quiesced VM snapshot, attempting again with quiescing "
"disabled."
msgstr ""
-#: nova/virt/libvirt/driver.py:1755
+#: nova/virt/libvirt/driver.py:1840
msgid "Unable to create VM snapshot, failing volume_snapshot operation."
msgstr ""
-#: nova/virt/libvirt/driver.py:1804
+#: nova/virt/libvirt/driver.py:1889
msgid ""
"Error occurred during volume_snapshot_create, sending error status to Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:1951
+#: nova/virt/libvirt/driver.py:2111
msgid ""
"Error occurred during volume_snapshot_delete, sending error status to Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421
+#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:2542
+#: nova/virt/libvirt/driver.py:2705
#, python-format
msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2693
+#: nova/virt/libvirt/driver.py:2873
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2786
+#: nova/virt/libvirt/driver.py:2966
#, python-format
msgid "Attaching PCI devices %(dev)s to %(dom)s failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:3553
+#: nova/virt/libvirt/driver.py:3783
#, python-format
-msgid "An error occurred while trying to define a domain with xml: %s"
+msgid "Error defining a domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3562
+#: nova/virt/libvirt/driver.py:3787
#, python-format
-msgid "An error occurred while trying to launch a defined domain with xml: %s"
+msgid "Error launching a defined domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3571
+#: nova/virt/libvirt/driver.py:3792
#, python-format
-msgid "An error occurred while enabling hairpin mode on domain with xml: %s"
+msgid "Error enabling hairpin mode with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3589
+#: nova/virt/libvirt/driver.py:3806
#, python-format
msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3904
+#: nova/virt/libvirt/driver.py:4115
#, python-format
msgid ""
"Hostname has changed from %(old)s to %(new)s. A restart is required to take "
"effect."
msgstr ""
-#: nova/virt/libvirt/driver.py:4481
+#: nova/virt/libvirt/driver.py:4794
#, python-format
msgid "Live Migration failure: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5231
+#: nova/virt/libvirt/driver.py:5596
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:202
+#: nova/virt/libvirt/imagebackend.py:200
#, python-format
msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:230
+#: nova/virt/libvirt/imagebackend.py:227
#, python-format
msgid ""
"%(base)s virtual size %(base_size)s larger than flavor root disk size "
"%(size)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:501
-#, python-format
-msgid "error opening rbd image %s"
-msgstr ""
-
-#: nova/virt/libvirt/imagecache.py:130
+#: nova/virt/libvirt/imagecache.py:129
#, python-format
msgid "Error reading image info file %(filename)s: %(error)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:391
+#: nova/virt/libvirt/imagecache.py:390
#, python-format
msgid "image %(id)s at (%(base_file)s): image verification failed"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:448
+#: nova/virt/libvirt/imagecache.py:447
#, python-format
msgid "Failed to remove %(base_file)s, error was %(error)s"
msgstr ""
-#: nova/virt/libvirt/lvm.py:201
+#: nova/virt/libvirt/lvm.py:200
#, python-format
msgid "ignoring unrecognized volume_clear='%s' value"
msgstr ""
-#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572
-#: nova/virt/libvirt/vif.py:596
+#: nova/virt/libvirt/rbd_utils.py:62
+#, python-format
+msgid "error opening rbd image %s"
+msgstr ""
+
+#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474
+#: nova/virt/libvirt/vif.py:496
msgid "Failed while plugging vif"
msgstr ""
-#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676
-#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717
-#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762
-#: nova/virt/libvirt/vif.py:784
+#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560
+#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598
+#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639
msgid "Failed while unplugging vif"
msgstr ""
@@ -288,12 +596,28 @@ msgstr ""
msgid "Unknown content in connection_info/access_mode: %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:666
+#: nova/virt/libvirt/volume.py:669
#, python-format
msgid "Couldn't unmount the NFS share %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:815
+#: nova/virt/libvirt/volume.py:818
#, python-format
msgid "Couldn't unmount the GlusterFS share %s"
msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:508
+#, python-format
+msgid ""
+"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1551
+#, python-format
+msgid "Attaching network adapter failed. Exception: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1591
+#, python-format
+msgid "Detaching network adapter failed. Exception: %s"
+msgstr ""
diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-info.po b/nova/locale/fr/LC_MESSAGES/nova-log-info.po
index c1685db4d1..c7475440ae 100644
--- a/nova/locale/fr/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/fr/LC_MESSAGES/nova-log-info.po
@@ -7,8 +7,8 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
-"PO-Revision-Date: 2014-06-30 05:01+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
+"PO-Revision-Date: 2014-08-15 05:00+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: French (http://www.transifex.com/projects/p/nova/language/"
"fr/)\n"
@@ -19,27 +19,77 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+#: nova/api/openstack/__init__.py:101
+#, python-format
+msgid "%(url)s returned with HTTP %(status)d"
+msgstr "%(url)s retourné avec HTTP %(status)d"
+
+#: nova/api/openstack/__init__.py:294
+msgid "V3 API has been disabled by configuration"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:688
+#, python-format
+msgid "Fault thrown: %s"
+msgstr "Erreur générée : %s"
+
+#: nova/api/openstack/wsgi.py:691
+#, python-format
+msgid "HTTP exception thrown: %s"
+msgstr "Exception HTTP générée : %s"
+
+#: nova/api/openstack/compute/contrib/os_networks.py:101
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128
+#, python-format
+msgid "Deleting network with id %s"
+msgstr "Suppression du réseau avec l'ID %s"
+
+#: nova/compute/manager.py:2663
+#, python-format
+msgid "bringing vm to original state: '%s'"
+msgstr "Restauration de l'état original de la machine virtuelle : '%s'"
+
+#: nova/compute/manager.py:5471
+#, python-format
+msgid ""
+"During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36
+msgid ""
+"Skipped adding reservations_deleted_expire_idx because an equivalent index "
+"already exists."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58
+msgid ""
+"Skipped removing reservations_deleted_expire_idx because index does not "
+"exist."
+msgstr ""
+
#: nova/openstack/common/eventlet_backdoor.py:141
#, python-format
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
msgstr "Eventlet backdoor en écoute sur le port %(port)s for process %(pid)d"
-#: nova/openstack/common/lockutils.py:83
+#: nova/openstack/common/lockutils.py:82
#, python-format
msgid "Created lock path: %s"
msgstr "Chemin de verrou créé: %s"
-#: nova/openstack/common/lockutils.py:250
+#: nova/openstack/common/lockutils.py:251
#, python-format
msgid "Failed to remove file %(file)s"
msgstr "Echec de la suppression du fichier %(file)s"
-#: nova/openstack/common/periodic_task.py:125
+#: nova/openstack/common/periodic_task.py:126
#, python-format
msgid "Skipping periodic task %(task)s because its interval is negative"
msgstr "Tâche périodique %(task)s ignorée car son intervalle est négatif"
-#: nova/openstack/common/periodic_task.py:130
+#: nova/openstack/common/periodic_task.py:131
#, python-format
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr "Tâche périodique %(task)s car elle est désactivée"
@@ -84,7 +134,7 @@ msgstr "%s interceptée, arrêt de l'enfant"
#: nova/openstack/common/service.py:403
msgid "Wait called after thread killed. Cleaning up."
-msgstr ""
+msgstr "Pause demandée après suppression de thread. Nettoyage."
#: nova/openstack/common/service.py:414
#, python-format
@@ -101,88 +151,102 @@ msgstr "Suppression ligne en double avec l'ID : %(id)s de la table : %(table)s"
msgid "%(num_values)d values found, of which the minimum value will be used."
msgstr ""
-#: nova/virt/libvirt/driver.py:894
+#: nova/virt/block_device.py:221
+#, python-format
+msgid "preserve multipath_id %s"
+msgstr ""
+
+#: nova/virt/firewall.py:444
+#, python-format
+msgid "instance chain %s disappeared during refresh, skipping"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:139
+msgid "Unable to force TCG mode, libguestfs too old?"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:835
+#, python-format
+msgid ""
+"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:948
msgid "Instance destroyed successfully."
msgstr "Instance détruite avec succès."
-#: nova/virt/libvirt/driver.py:904
+#: nova/virt/libvirt/driver.py:958
msgid "Instance may be started again."
msgstr "L'instance peut être redémarrée."
-#: nova/virt/libvirt/driver.py:914
+#: nova/virt/libvirt/driver.py:968
msgid "Going to destroy instance again."
msgstr "Tentative de redestruction de l'instance."
-#: nova/virt/libvirt/driver.py:1518
+#: nova/virt/libvirt/driver.py:1576
msgid "Beginning live snapshot process"
msgstr "Démarrage du processus d'instantané en temps réel"
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1579
msgid "Beginning cold snapshot process"
msgstr "Démarrage du processus d'instantané à froid"
-#: nova/virt/libvirt/driver.py:1550
+#: nova/virt/libvirt/driver.py:1608
msgid "Snapshot extracted, beginning image upload"
msgstr "Instantané extrait, démarrage du téléchargement d'image"
-#: nova/virt/libvirt/driver.py:1562
+#: nova/virt/libvirt/driver.py:1620
msgid "Snapshot image upload complete"
msgstr "Téléchargement d'image instantanée terminé"
-#: nova/virt/libvirt/driver.py:1972
+#: nova/virt/libvirt/driver.py:2132
msgid "Instance soft rebooted successfully."
msgstr "Instance redémarrée par logiciel avec succès."
-#: nova/virt/libvirt/driver.py:2015
+#: nova/virt/libvirt/driver.py:2175
msgid "Instance shutdown successfully."
msgstr "L'instance s'est arrêtée avec succès."
-#: nova/virt/libvirt/driver.py:2023
+#: nova/virt/libvirt/driver.py:2183
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr "L'instance a sans doute été redémarrée par logiciel ; retour en cours."
-#: nova/virt/libvirt/driver.py:2091
+#: nova/virt/libvirt/driver.py:2252
msgid "Instance rebooted successfully."
msgstr "L'instance a redémarré avec succès."
-#: nova/virt/libvirt/driver.py:2259
+#: nova/virt/libvirt/driver.py:2420
msgid "Instance spawned successfully."
msgstr "Instance générée avec succès."
-#: nova/virt/libvirt/driver.py:2275
+#: nova/virt/libvirt/driver.py:2436
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr "data: %(data)r, fpath: %(fpath)r"
-#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341
+#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr "Journal de console tronqué retourné, %d octets ignorés"
-#: nova/virt/libvirt/driver.py:2568
+#: nova/virt/libvirt/driver.py:2731
msgid "Creating image"
msgstr "Création de l'image"
-#: nova/virt/libvirt/driver.py:2677
+#: nova/virt/libvirt/driver.py:2857
msgid "Using config drive"
msgstr "Utilisation de l'unité de config"
-#: nova/virt/libvirt/driver.py:2686
+#: nova/virt/libvirt/driver.py:2866
#, python-format
msgid "Creating config drive at %(path)s"
msgstr "Création de l'unité de config à %(path)s"
-#: nova/virt/libvirt/driver.py:3223
+#: nova/virt/libvirt/driver.py:3437
msgid "Configuring timezone for windows instance to localtime"
msgstr ""
-#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821
-#: nova/virt/libvirt/driver.py:3849
-#, python-format
-msgid "libvirt can't find a domain with id: %s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:4109
+#: nova/virt/libvirt/driver.py:4320
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
@@ -192,7 +256,7 @@ msgstr ""
"être détaché. Instance=%(instance_name)s Disk=%(disk)s Code=%(errcode)s "
"Erreur=%(e)s"
-#: nova/virt/libvirt/driver.py:4115
+#: nova/virt/libvirt/driver.py:4326
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats for "
@@ -201,49 +265,49 @@ msgstr ""
"Domaine introuvable dans libvirt pour l'instance %s. Impossible d'obtenir "
"les stats de bloc pour l'unité"
-#: nova/virt/libvirt/driver.py:4330
+#: nova/virt/libvirt/driver.py:4568
#, python-format
msgid "Instance launched has CPU info: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4986
+#: nova/virt/libvirt/driver.py:5316
msgid "Instance running successfully."
msgstr "L'instance s'exécute avec succès."
-#: nova/virt/libvirt/driver.py:5226
+#: nova/virt/libvirt/driver.py:5590
#, python-format
msgid "Deleting instance files %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5238
+#: nova/virt/libvirt/driver.py:5603
#, python-format
msgid "Deletion of %s failed"
msgstr ""
-#: nova/virt/libvirt/driver.py:5241
+#: nova/virt/libvirt/driver.py:5607
#, python-format
msgid "Deletion of %s complete"
msgstr ""
-#: nova/virt/libvirt/firewall.py:105
+#: nova/virt/libvirt/firewall.py:106
msgid "Called setup_basic_filtering in nwfilter"
msgstr "setup_basic_filtering appelé dans nwfilter"
-#: nova/virt/libvirt/firewall.py:113
+#: nova/virt/libvirt/firewall.py:114
msgid "Ensuring static filters"
msgstr "Garantie des filtres statiques"
-#: nova/virt/libvirt/firewall.py:306
+#: nova/virt/libvirt/firewall.py:305
msgid "Attempted to unfilter instance which is not filtered"
msgstr ""
"Vous avez essayé d'annuler le filtre d'une instance qui n'est pas filtrée"
-#: nova/virt/libvirt/imagecache.py:191
+#: nova/virt/libvirt/imagecache.py:190
#, python-format
msgid "Writing stored info to %s"
msgstr "Ecriture d'informations stockées dans %s"
-#: nova/virt/libvirt/imagecache.py:401
+#: nova/virt/libvirt/imagecache.py:400
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash stored"
@@ -251,27 +315,27 @@ msgstr ""
"image %(id)s à (%(base_file)s) : vérification d'image ignorée, aucun hachage "
"stocké"
-#: nova/virt/libvirt/imagecache.py:410
+#: nova/virt/libvirt/imagecache.py:409
#, python-format
msgid "%(id)s (%(base_file)s): generating checksum"
msgstr "%(id)s (%(base_file)s) : génération d'un total de contrôle"
-#: nova/virt/libvirt/imagecache.py:438
+#: nova/virt/libvirt/imagecache.py:437
#, python-format
msgid "Base file too young to remove: %s"
msgstr "Fichier de base trop jeune pour un retrait : %s"
-#: nova/virt/libvirt/imagecache.py:441
+#: nova/virt/libvirt/imagecache.py:440
#, python-format
msgid "Removing base file: %s"
msgstr "Retrait du fichier de base : %s"
-#: nova/virt/libvirt/imagecache.py:459
+#: nova/virt/libvirt/imagecache.py:458
#, python-format
msgid "image %(id)s at (%(base_file)s): checking"
msgstr "image %(id)s à (%(base_file)s) : vérification"
-#: nova/virt/libvirt/imagecache.py:483
+#: nova/virt/libvirt/imagecache.py:482
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
@@ -281,26 +345,26 @@ msgstr ""
"%(local)d local, %(remote)d sur d'autres noeuds partageant ce stockage "
"d'instance"
-#: nova/virt/libvirt/imagecache.py:550
+#: nova/virt/libvirt/imagecache.py:549
#, python-format
msgid "Active base files: %s"
msgstr "Fichiers de base actifs : %s"
-#: nova/virt/libvirt/imagecache.py:553
+#: nova/virt/libvirt/imagecache.py:552
#, python-format
msgid "Corrupt base files: %s"
msgstr "Fichiers de base endommagés : %s"
-#: nova/virt/libvirt/imagecache.py:557
+#: nova/virt/libvirt/imagecache.py:556
#, python-format
msgid "Removable base files: %s"
msgstr "Fichiers de base pouvant être retirés : %s"
-#: nova/virt/libvirt/utils.py:530
+#: nova/virt/libvirt/utils.py:490
msgid "findmnt tool is not installed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1352
+#: nova/virt/xenapi/vm_utils.py:1355
#, python-format
msgid ""
"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s "
diff --git a/nova/locale/it/LC_MESSAGES/nova-log-info.po b/nova/locale/it/LC_MESSAGES/nova-log-info.po
index 97d107968f..23c0c4f2e5 100644
--- a/nova/locale/it/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/it/LC_MESSAGES/nova-log-info.po
@@ -7,8 +7,8 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
-"PO-Revision-Date: 2014-06-18 19:31+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
+"PO-Revision-Date: 2014-08-07 07:51+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Italian (http://www.transifex.com/projects/p/nova/language/"
"it/)\n"
@@ -19,28 +19,78 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+#: nova/api/openstack/__init__.py:101
+#, python-format
+msgid "%(url)s returned with HTTP %(status)d"
+msgstr "%(url)s restituito con HTTP %(status)d"
+
+#: nova/api/openstack/__init__.py:294
+msgid "V3 API has been disabled by configuration"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:688
+#, python-format
+msgid "Fault thrown: %s"
+msgstr "Errore generato: %s"
+
+#: nova/api/openstack/wsgi.py:691
+#, python-format
+msgid "HTTP exception thrown: %s"
+msgstr "Generata eccezione HTTP: %s"
+
+#: nova/api/openstack/compute/contrib/os_networks.py:101
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128
+#, python-format
+msgid "Deleting network with id %s"
+msgstr "Eliminazione della rete con id %s"
+
+#: nova/compute/manager.py:2663
+#, python-format
+msgid "bringing vm to original state: '%s'"
+msgstr ""
+
+#: nova/compute/manager.py:5471
+#, python-format
+msgid ""
+"During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36
+msgid ""
+"Skipped adding reservations_deleted_expire_idx because an equivalent index "
+"already exists."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58
+msgid ""
+"Skipped removing reservations_deleted_expire_idx because index does not "
+"exist."
+msgstr ""
+
#: nova/openstack/common/eventlet_backdoor.py:141
#, python-format
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
msgstr ""
-#: nova/openstack/common/lockutils.py:83
+#: nova/openstack/common/lockutils.py:82
#, python-format
msgid "Created lock path: %s"
-msgstr ""
+msgstr "Preato percorso di blocco : %s"
-#: nova/openstack/common/lockutils.py:250
+#: nova/openstack/common/lockutils.py:251
#, python-format
msgid "Failed to remove file %(file)s"
-msgstr ""
+msgstr "Tentativo fallito nella rimozione di %(file)s"
-#: nova/openstack/common/periodic_task.py:125
+#: nova/openstack/common/periodic_task.py:126
#, python-format
msgid "Skipping periodic task %(task)s because its interval is negative"
msgstr ""
"Abbadono dell'attività periodica %(task)s perché l'intervalo è negativo"
-#: nova/openstack/common/periodic_task.py:130
+#: nova/openstack/common/periodic_task.py:131
#, python-format
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr "Abbadono dell'attività periodica %(task)s perché è disabilitata"
@@ -85,7 +135,7 @@ msgstr "Intercettato %s, arresto in corso dei children"
#: nova/openstack/common/service.py:403
msgid "Wait called after thread killed. Cleaning up."
-msgstr ""
+msgstr "Attendere la chiamata dopo l'uccisione de filo. Bonifica."
#: nova/openstack/common/service.py:414
#, python-format
@@ -95,150 +145,168 @@ msgstr "In attesa %d degli elementi secondari per uscire"
#: nova/openstack/common/db/sqlalchemy/utils.py:387
#, python-format
msgid "Deleting duplicated row with id: %(id)s from table: %(table)s"
-msgstr ""
+msgstr "Cancellata riga duplicata con id: %(id)s dalla tablella: %(table)s"
#: nova/scheduler/filters/utils.py:50
#, python-format
msgid "%(num_values)d values found, of which the minimum value will be used."
msgstr ""
-#: nova/virt/libvirt/driver.py:894
-msgid "Instance destroyed successfully."
+#: nova/virt/block_device.py:221
+#, python-format
+msgid "preserve multipath_id %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:904
-msgid "Instance may be started again."
+#: nova/virt/firewall.py:444
+#, python-format
+msgid "instance chain %s disappeared during refresh, skipping"
msgstr ""
-#: nova/virt/libvirt/driver.py:914
-msgid "Going to destroy instance again."
+#: nova/virt/disk/vfs/guestfs.py:139
+msgid "Unable to force TCG mode, libguestfs too old?"
msgstr ""
-#: nova/virt/libvirt/driver.py:1518
-msgid "Beginning live snapshot process"
+#: nova/virt/libvirt/driver.py:835
+#, python-format
+msgid ""
+"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:948
+msgid "Instance destroyed successfully."
+msgstr "Istanza distrutta correttamente."
+
+#: nova/virt/libvirt/driver.py:958
+msgid "Instance may be started again."
+msgstr "L'istanza può essere avviata di nuovo."
+
+#: nova/virt/libvirt/driver.py:968
+msgid "Going to destroy instance again."
+msgstr "L'istanza verrà nuovamente distrutta."
+
+#: nova/virt/libvirt/driver.py:1576
+msgid "Beginning live snapshot process"
+msgstr "Inizio processo attivo istantanea"
+
+#: nova/virt/libvirt/driver.py:1579
msgid "Beginning cold snapshot process"
-msgstr ""
+msgstr "Inizio processo di istantanea a freddo"
-#: nova/virt/libvirt/driver.py:1550
+#: nova/virt/libvirt/driver.py:1608
msgid "Snapshot extracted, beginning image upload"
-msgstr ""
+msgstr "Istantanea estratta, inizio caricamento immagine"
-#: nova/virt/libvirt/driver.py:1562
+#: nova/virt/libvirt/driver.py:1620
msgid "Snapshot image upload complete"
-msgstr ""
+msgstr "Caricamento immagine istantanea completato"
-#: nova/virt/libvirt/driver.py:1972
+#: nova/virt/libvirt/driver.py:2132
msgid "Instance soft rebooted successfully."
-msgstr ""
+msgstr "Avvio a caldo dell'istanza eseguito correttamente."
-#: nova/virt/libvirt/driver.py:2015
+#: nova/virt/libvirt/driver.py:2175
msgid "Instance shutdown successfully."
-msgstr ""
+msgstr "Chiusura dell'istanza eseguita correttamente."
-#: nova/virt/libvirt/driver.py:2023
+#: nova/virt/libvirt/driver.py:2183
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
+"L'istanza potrebbe essere stat riavviata durante l'avvio a caldo, quindi "
+"ritornare adesso."
-#: nova/virt/libvirt/driver.py:2091
+#: nova/virt/libvirt/driver.py:2252
msgid "Instance rebooted successfully."
-msgstr ""
+msgstr "Istanza riavviata correttamente."
-#: nova/virt/libvirt/driver.py:2259
+#: nova/virt/libvirt/driver.py:2420
msgid "Instance spawned successfully."
-msgstr ""
+msgstr "Istanza generata correttamente."
-#: nova/virt/libvirt/driver.py:2275
+#: nova/virt/libvirt/driver.py:2436
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
-msgstr ""
+msgstr "dati: %(data)r, fpath: %(fpath)r"
-#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341
+#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
-msgstr ""
+msgstr "Restituito log della console troncato, %d byte ignorati"
-#: nova/virt/libvirt/driver.py:2568
+#: nova/virt/libvirt/driver.py:2731
msgid "Creating image"
-msgstr ""
+msgstr "Creazione immagine"
-#: nova/virt/libvirt/driver.py:2677
+#: nova/virt/libvirt/driver.py:2857
msgid "Using config drive"
-msgstr ""
+msgstr "Utilizzo unità di config"
-#: nova/virt/libvirt/driver.py:2686
+#: nova/virt/libvirt/driver.py:2866
#, python-format
msgid "Creating config drive at %(path)s"
-msgstr ""
+msgstr "Creazione unità config in %(path)s"
-#: nova/virt/libvirt/driver.py:3223
+#: nova/virt/libvirt/driver.py:3437
msgid "Configuring timezone for windows instance to localtime"
msgstr ""
-#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821
-#: nova/virt/libvirt/driver.py:3849
-#, python-format
-msgid "libvirt can't find a domain with id: %s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:4109
+#: nova/virt/libvirt/driver.py:4320
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4115
+#: nova/virt/libvirt/driver.py:4326
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats for "
"device"
msgstr ""
+"Impossibile trovare il dominio in libvirt per l'istanza %s. Impossibile "
+"ottenere le statistiche del blocco per l'unità"
-#: nova/virt/libvirt/driver.py:4330
+#: nova/virt/libvirt/driver.py:4568
#, python-format
msgid "Instance launched has CPU info: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4986
+#: nova/virt/libvirt/driver.py:5316
msgid "Instance running successfully."
-msgstr ""
+msgstr "Istanza in esecuzione correttamente."
-#: nova/virt/libvirt/driver.py:5226
+#: nova/virt/libvirt/driver.py:5590
#, python-format
msgid "Deleting instance files %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5238
+#: nova/virt/libvirt/driver.py:5603
#, python-format
msgid "Deletion of %s failed"
msgstr ""
-#: nova/virt/libvirt/driver.py:5241
+#: nova/virt/libvirt/driver.py:5607
#, python-format
msgid "Deletion of %s complete"
msgstr ""
-#: nova/virt/libvirt/firewall.py:105
+#: nova/virt/libvirt/firewall.py:106
msgid "Called setup_basic_filtering in nwfilter"
-msgstr ""
+msgstr "Chiamato setup_basic_filtering in nwfilter"
-#: nova/virt/libvirt/firewall.py:113
+#: nova/virt/libvirt/firewall.py:114
msgid "Ensuring static filters"
-msgstr ""
+msgstr "Controllo dei filtri statici"
-#: nova/virt/libvirt/firewall.py:306
+#: nova/virt/libvirt/firewall.py:305
msgid "Attempted to unfilter instance which is not filtered"
-msgstr ""
+msgstr "Si è tentato di rimuovere il filtro da un'istanza senza filtro"
-#: nova/virt/libvirt/imagecache.py:191
+#: nova/virt/libvirt/imagecache.py:190
#, python-format
msgid "Writing stored info to %s"
msgstr "Scrittura informazioni memorizzate in %s"
-#: nova/virt/libvirt/imagecache.py:401
+#: nova/virt/libvirt/imagecache.py:400
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash stored"
@@ -246,27 +314,27 @@ msgstr ""
"immagine %(id)s in (%(base_file)s): verifica dell'immagine ignorata, nessun "
"hash memorizzato"
-#: nova/virt/libvirt/imagecache.py:410
+#: nova/virt/libvirt/imagecache.py:409
#, python-format
msgid "%(id)s (%(base_file)s): generating checksum"
msgstr "%(id)s (%(base_file)s): generazione checksum"
-#: nova/virt/libvirt/imagecache.py:438
+#: nova/virt/libvirt/imagecache.py:437
#, python-format
msgid "Base file too young to remove: %s"
msgstr "File di base troppo recente per essere rimosso: %s"
-#: nova/virt/libvirt/imagecache.py:441
+#: nova/virt/libvirt/imagecache.py:440
#, python-format
msgid "Removing base file: %s"
msgstr "Rimozione del file di base: %s"
-#: nova/virt/libvirt/imagecache.py:459
+#: nova/virt/libvirt/imagecache.py:458
#, python-format
msgid "image %(id)s at (%(base_file)s): checking"
msgstr "immagine %(id)s in (%(base_file)s): verifica"
-#: nova/virt/libvirt/imagecache.py:483
+#: nova/virt/libvirt/imagecache.py:482
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
@@ -275,26 +343,26 @@ msgstr ""
"immagine %(id)s in (%(base_file)s): in uso: in questo nodo %(local)d locale, "
"%(remote)d in altri nodi che condividono questa archiviazione dell'istanza"
-#: nova/virt/libvirt/imagecache.py:550
+#: nova/virt/libvirt/imagecache.py:549
#, python-format
msgid "Active base files: %s"
msgstr "File di base attivi: %s"
-#: nova/virt/libvirt/imagecache.py:553
+#: nova/virt/libvirt/imagecache.py:552
#, python-format
msgid "Corrupt base files: %s"
msgstr "File di base danneggiato: %s"
-#: nova/virt/libvirt/imagecache.py:557
+#: nova/virt/libvirt/imagecache.py:556
#, python-format
msgid "Removable base files: %s"
msgstr "File di base rimovibili: %s"
-#: nova/virt/libvirt/utils.py:530
+#: nova/virt/libvirt/utils.py:490
msgid "findmnt tool is not installed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1352
+#: nova/virt/xenapi/vm_utils.py:1355
#, python-format
msgid ""
"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s "
diff --git a/nova/locale/ja/LC_MESSAGES/nova-log-error.po b/nova/locale/ja/LC_MESSAGES/nova-log-error.po
index ec595ace0f..a30dee0f20 100644
--- a/nova/locale/ja/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/ja/LC_MESSAGES/nova-log-error.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:08+0000\n"
+"POT-Creation-Date: 2014-08-18 06:04+0000\n"
"PO-Revision-Date: 2014-06-20 16:41+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Japanese (http://www.transifex.com/projects/p/nova/language/"
@@ -39,11 +39,305 @@ msgstr ""
msgid "Exception running %(name)s post-hook: %(obj)s"
msgstr ""
-#: nova/api/ec2/__init__.py:243
+#: nova/api/ec2/__init__.py:244
#, python-format
msgid "Keystone failure: %s"
msgstr ""
+#: nova/api/ec2/__init__.py:493
+#, python-format
+msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:520
+#, python-format
+msgid "Environment: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:155
+#, python-format
+msgid "Failed to get metadata for ip: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:212
+#, python-format
+msgid "Failed to get metadata for instance id: %s"
+msgstr ""
+
+#: nova/api/openstack/common.py:134
+#, python-format
+msgid ""
+"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad "
+"upgrade or db corrupted?"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:684
+#, python-format
+msgid "Exception handling resource: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:68
+#, python-format
+msgid "Compute.api::pause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:90
+#, python-format
+msgid "Compute.api::unpause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:112
+#, python-format
+msgid "compute.api::suspend %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:134
+#, python-format
+msgid "compute.api::resume %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:160
+#, python-format
+msgid "Error in migrate %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:179
+#, python-format
+msgid "Compute.api::reset_network %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:198
+#, python-format
+msgid "Compute.api::inject_network_info %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:215
+#, python-format
+msgid "Compute.api::lock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:234
+#, python-format
+msgid "Compute.api::unlock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:392
+#, python-format
+msgid "Compute.api::resetState %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/multinic.py:85
+#, python-format
+msgid "Unable to find address %r"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85
+msgid "Failed to get default networks"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125
+msgid "Failed to update usages deallocating network."
+msgstr ""
+
+#: nova/compute/api.py:561
+msgid "Failed to set instance name using multi_instance_display_name_template."
+msgstr ""
+
+#: nova/compute/api.py:1429
+msgid ""
+"Something wrong happened when trying to delete snapshot from shelved "
+"instance."
+msgstr ""
+
+#: nova/compute/api.py:3732
+msgid "Failed to update usages deallocating security group"
+msgstr ""
+
+#: nova/compute/flavors.py:167
+#, python-format
+msgid "DB error: %s"
+msgstr ""
+
+#: nova/compute/flavors.py:178
+#, python-format
+msgid "Instance type %s not found for deletion"
+msgstr ""
+
+#: nova/compute/manager.py:366
+#, python-format
+msgid "Error while trying to clean up image %s"
+msgstr ""
+
+#: nova/compute/manager.py:755
+msgid "Failed to check if instance shared"
+msgstr ""
+
+#: nova/compute/manager.py:821 nova/compute/manager.py:872
+msgid "Failed to complete a deletion"
+msgstr ""
+
+#: nova/compute/manager.py:913
+msgid "Failed to stop instance"
+msgstr ""
+
+#: nova/compute/manager.py:925
+msgid "Failed to start instance"
+msgstr ""
+
+#: nova/compute/manager.py:950
+msgid "Failed to revert crashed migration"
+msgstr ""
+
+#: nova/compute/manager.py:1364
+msgid "Failed to dealloc network for deleted instance"
+msgstr ""
+
+#: nova/compute/manager.py:1385
+msgid "Failed to dealloc network for failed instance"
+msgstr ""
+
+#: nova/compute/manager.py:1458 nova/compute/manager.py:3527
+msgid "Error trying to reschedule"
+msgstr ""
+
+#: nova/compute/manager.py:1567
+#, python-format
+msgid "Instance failed network setup after %(attempts)d attempt(s)"
+msgstr ""
+
+#: nova/compute/manager.py:1761
+msgid "Instance failed block device setup"
+msgstr ""
+
+#: nova/compute/manager.py:1781 nova/compute/manager.py:2123
+#: nova/compute/manager.py:4071
+msgid "Instance failed to spawn"
+msgstr ""
+
+#: nova/compute/manager.py:1964
+msgid "Unexpected build failure, not rescheduling build."
+msgstr ""
+
+#: nova/compute/manager.py:2033 nova/compute/manager.py:2085
+msgid "Failed to allocate network(s)"
+msgstr ""
+
+#: nova/compute/manager.py:2111
+msgid "Failure prepping block device"
+msgstr ""
+
+#: nova/compute/manager.py:2144
+msgid "Failed to deallocate networks"
+msgstr ""
+
+#: nova/compute/manager.py:2374 nova/compute/manager.py:3718
+#: nova/compute/manager.py:5822
+msgid "Setting instance vm_state to ERROR"
+msgstr ""
+
+#: nova/compute/manager.py:2586 nova/compute/manager.py:4933
+#, python-format
+msgid "Failed to get compute_info for %s"
+msgstr ""
+
+#: nova/compute/manager.py:3013
+#, python-format
+msgid "set_admin_password failed: %s"
+msgstr ""
+
+#: nova/compute/manager.py:3098
+msgid "Error trying to Rescue Instance"
+msgstr ""
+
+#: nova/compute/manager.py:3724
+#, python-format
+msgid "Failed to rollback quota for failed finish_resize: %s"
+msgstr ""
+
+#: nova/compute/manager.py:4323
+#, python-format
+msgid "Failed to attach %(volume_id)s at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4362
+#, python-format
+msgid "Failed to detach volume %(volume_id)s from %(mp)s"
+msgstr ""
+
+#: nova/compute/manager.py:4441
+#, python-format
+msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s"
+msgstr ""
+
+#: nova/compute/manager.py:4448
+#, python-format
+msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4735
+#, python-format
+msgid "Pre live migration failed at %s"
+msgstr ""
+
+#: nova/compute/manager.py:5235
+msgid "Periodic task failed to offload instance."
+msgstr ""
+
+#: nova/compute/manager.py:5275
+#, python-format
+msgid "Failed to generate usage audit for instance on host %s"
+msgstr ""
+
+#: nova/compute/manager.py:5465
+msgid ""
+"Periodic sync_power_state task had an error while processing an instance."
+msgstr ""
+
+#: nova/compute/manager.py:5568 nova/compute/manager.py:5577
+#: nova/compute/manager.py:5608 nova/compute/manager.py:5619
+msgid "error during stop() in sync_power_state."
+msgstr ""
+
+#: nova/network/neutronv2/api.py:234
+#, python-format
+msgid "Neutron error creating port on network %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:418
+#, python-format
+msgid "Failed to update port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:425
+#, python-format
+msgid "Failed to delete port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524
+#, python-format
+msgid "Failed to delete neutron port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:697
+#, python-format
+msgid "Failed to access port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:931
+#, python-format
+msgid "Unable to access floating IP %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1065
+#, python-format
+msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1124
+#, python-format
+msgid "Unable to update host of port %s"
+msgstr ""
+
#: nova/objects/instance_fault.py:87
msgid "Failed to notify cells of instance fault"
msgstr "インスタンスの障害をセルに通知できませんでした"
@@ -58,35 +352,35 @@ msgstr "除去される元の例外: %s"
msgid "Unexpected exception occurred %d time(s)... retrying."
msgstr "予期せぬ例外が、%d回()発生しました。再試行中。"
-#: nova/openstack/common/lockutils.py:120
+#: nova/openstack/common/lockutils.py:119
#, python-format
msgid "Could not release the acquired lock `%s`"
msgstr ""
-#: nova/openstack/common/loopingcall.py:89
+#: nova/openstack/common/loopingcall.py:95
msgid "in fixed duration looping call"
msgstr "一定期間の呼び出しループ"
-#: nova/openstack/common/loopingcall.py:136
+#: nova/openstack/common/loopingcall.py:138
msgid "in dynamic looping call"
msgstr "動的呼び出しループ"
-#: nova/openstack/common/periodic_task.py:179
+#: nova/openstack/common/periodic_task.py:202
#, python-format
msgid "Error during %(full_task_name)s: %(e)s"
msgstr "%(full_task_name)s 中のエラー: %(e)s"
-#: nova/openstack/common/policy.py:511
+#: nova/openstack/common/policy.py:507
#, python-format
msgid "Failed to understand rule %s"
msgstr "ルール %s を解釈できませんでした"
-#: nova/openstack/common/policy.py:521
+#: nova/openstack/common/policy.py:517
#, python-format
msgid "No handler for matches of kind %s"
msgstr "種類 %s の一致向けのハンドラーがありません"
-#: nova/openstack/common/policy.py:791
+#: nova/openstack/common/policy.py:787
#, python-format
msgid "Failed to understand rule %r"
msgstr "ルール %r を解釈できませんでした"
@@ -116,172 +410,186 @@ msgstr "DB 例外がラップされました。"
msgid "Failed to migrate to version %s on engine %s"
msgstr "バージョン%sをエンジン%sへの移行が失敗しました。"
-#: nova/virt/libvirt/driver.py:625
+#: nova/pci/pci_stats.py:119
+msgid ""
+"Failed to allocate PCI devices for instance. Unassigning devices back to "
+"pools. This should not happen, since the scheduler should have accurate "
+"information, and allocation during claims is controlled via a hold on the "
+"compute node semaphore"
+msgstr ""
+
+#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109
+#, python-format
+msgid "PCI device %s not found"
+msgstr ""
+
+#: nova/virt/disk/api.py:388
+#, python-format
+msgid ""
+"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:639
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:749
+#: nova/virt/libvirt/driver.py:764
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:873
+#: nova/virt/libvirt/driver.py:927
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:889
-msgid "During wait destroy, instance disappeared."
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:951
+#: nova/virt/libvirt/driver.py:1005
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:977
+#: nova/virt/libvirt/driver.py:1033
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1389
+#: nova/virt/libvirt/driver.py:1444
msgid "attaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1414
+#: nova/virt/libvirt/driver.py:1471
msgid "detaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1663
+#: nova/virt/libvirt/driver.py:1726
msgid "Failed to send updated snapshot status to volume service."
msgstr ""
-#: nova/virt/libvirt/driver.py:1749
+#: nova/virt/libvirt/driver.py:1834
msgid ""
"Unable to create quiesced VM snapshot, attempting again with quiescing "
"disabled."
msgstr ""
-#: nova/virt/libvirt/driver.py:1755
+#: nova/virt/libvirt/driver.py:1840
msgid "Unable to create VM snapshot, failing volume_snapshot operation."
msgstr ""
-#: nova/virt/libvirt/driver.py:1804
+#: nova/virt/libvirt/driver.py:1889
msgid ""
"Error occurred during volume_snapshot_create, sending error status to Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:1951
+#: nova/virt/libvirt/driver.py:2111
msgid ""
"Error occurred during volume_snapshot_delete, sending error status to Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421
+#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:2542
+#: nova/virt/libvirt/driver.py:2705
#, python-format
msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2693
+#: nova/virt/libvirt/driver.py:2873
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2786
+#: nova/virt/libvirt/driver.py:2966
#, python-format
msgid "Attaching PCI devices %(dev)s to %(dom)s failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:3553
+#: nova/virt/libvirt/driver.py:3783
#, python-format
-msgid "An error occurred while trying to define a domain with xml: %s"
+msgid "Error defining a domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3562
+#: nova/virt/libvirt/driver.py:3787
#, python-format
-msgid "An error occurred while trying to launch a defined domain with xml: %s"
+msgid "Error launching a defined domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3571
+#: nova/virt/libvirt/driver.py:3792
#, python-format
-msgid "An error occurred while enabling hairpin mode on domain with xml: %s"
+msgid "Error enabling hairpin mode with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3589
+#: nova/virt/libvirt/driver.py:3806
#, python-format
msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3904
+#: nova/virt/libvirt/driver.py:4115
#, python-format
msgid ""
"Hostname has changed from %(old)s to %(new)s. A restart is required to take "
"effect."
msgstr ""
-#: nova/virt/libvirt/driver.py:4481
+#: nova/virt/libvirt/driver.py:4794
#, python-format
msgid "Live Migration failure: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5231
+#: nova/virt/libvirt/driver.py:5596
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:202
+#: nova/virt/libvirt/imagebackend.py:200
#, python-format
msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:230
+#: nova/virt/libvirt/imagebackend.py:227
#, python-format
msgid ""
"%(base)s virtual size %(base_size)s larger than flavor root disk size "
"%(size)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:501
-#, python-format
-msgid "error opening rbd image %s"
-msgstr ""
-
-#: nova/virt/libvirt/imagecache.py:130
+#: nova/virt/libvirt/imagecache.py:129
#, python-format
msgid "Error reading image info file %(filename)s: %(error)s"
msgstr ""
"イメージ情報ファイル %(filename)s の読み取り中にエラーが発生しました: "
"%(error)s"
-#: nova/virt/libvirt/imagecache.py:391
+#: nova/virt/libvirt/imagecache.py:390
#, python-format
msgid "image %(id)s at (%(base_file)s): image verification failed"
msgstr "(%(base_file)s) にあるイメージ %(id)s: イメージの検査が失敗しました"
-#: nova/virt/libvirt/imagecache.py:448
+#: nova/virt/libvirt/imagecache.py:447
#, python-format
msgid "Failed to remove %(base_file)s, error was %(error)s"
msgstr "%(base_file)s の削除に失敗しました。エラーは %(error)s"
-#: nova/virt/libvirt/lvm.py:201
+#: nova/virt/libvirt/lvm.py:200
#, python-format
msgid "ignoring unrecognized volume_clear='%s' value"
msgstr ""
-#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572
-#: nova/virt/libvirt/vif.py:596
+#: nova/virt/libvirt/rbd_utils.py:62
+#, python-format
+msgid "error opening rbd image %s"
+msgstr ""
+
+#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474
+#: nova/virt/libvirt/vif.py:496
msgid "Failed while plugging vif"
msgstr ""
-#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676
-#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717
-#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762
-#: nova/virt/libvirt/vif.py:784
+#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560
+#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598
+#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639
msgid "Failed while unplugging vif"
msgstr ""
@@ -290,12 +598,28 @@ msgstr ""
msgid "Unknown content in connection_info/access_mode: %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:666
+#: nova/virt/libvirt/volume.py:669
#, python-format
msgid "Couldn't unmount the NFS share %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:815
+#: nova/virt/libvirt/volume.py:818
#, python-format
msgid "Couldn't unmount the GlusterFS share %s"
msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:508
+#, python-format
+msgid ""
+"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1551
+#, python-format
+msgid "Attaching network adapter failed. Exception: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1591
+#, python-format
+msgid "Detaching network adapter failed. Exception: %s"
+msgstr ""
diff --git a/nova/locale/ja/LC_MESSAGES/nova-log-info.po b/nova/locale/ja/LC_MESSAGES/nova-log-info.po
index 8dbd4c8f9d..7af3d0293b 100644
--- a/nova/locale/ja/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/ja/LC_MESSAGES/nova-log-info.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
"PO-Revision-Date: 2014-06-30 04:40+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Japanese (http://www.transifex.com/projects/p/nova/language/"
@@ -19,27 +19,77 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=1; plural=0;\n"
+#: nova/api/openstack/__init__.py:101
+#, python-format
+msgid "%(url)s returned with HTTP %(status)d"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:294
+msgid "V3 API has been disabled by configuration"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:688
+#, python-format
+msgid "Fault thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:691
+#, python-format
+msgid "HTTP exception thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_networks.py:101
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128
+#, python-format
+msgid "Deleting network with id %s"
+msgstr ""
+
+#: nova/compute/manager.py:2663
+#, python-format
+msgid "bringing vm to original state: '%s'"
+msgstr ""
+
+#: nova/compute/manager.py:5471
+#, python-format
+msgid ""
+"During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36
+msgid ""
+"Skipped adding reservations_deleted_expire_idx because an equivalent index "
+"already exists."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58
+msgid ""
+"Skipped removing reservations_deleted_expire_idx because index does not "
+"exist."
+msgstr ""
+
#: nova/openstack/common/eventlet_backdoor.py:141
#, python-format
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
msgstr "Eventlet backdoorは、プロセス%(pid)dの%(port)sをリスニングしています。"
-#: nova/openstack/common/lockutils.py:83
+#: nova/openstack/common/lockutils.py:82
#, python-format
msgid "Created lock path: %s"
msgstr ""
-#: nova/openstack/common/lockutils.py:250
+#: nova/openstack/common/lockutils.py:251
#, python-format
msgid "Failed to remove file %(file)s"
msgstr ""
-#: nova/openstack/common/periodic_task.py:125
+#: nova/openstack/common/periodic_task.py:126
#, python-format
msgid "Skipping periodic task %(task)s because its interval is negative"
msgstr "タスクの間隔が負であるため、定期タスク %(task)s をスキップしています"
-#: nova/openstack/common/periodic_task.py:130
+#: nova/openstack/common/periodic_task.py:131
#, python-format
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr "タスクが使用不可であるため、定期タスク %(task)s をスキップしています"
@@ -101,99 +151,113 @@ msgstr ""
msgid "%(num_values)d values found, of which the minimum value will be used."
msgstr ""
-#: nova/virt/libvirt/driver.py:894
+#: nova/virt/block_device.py:221
+#, python-format
+msgid "preserve multipath_id %s"
+msgstr ""
+
+#: nova/virt/firewall.py:444
+#, python-format
+msgid "instance chain %s disappeared during refresh, skipping"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:139
+msgid "Unable to force TCG mode, libguestfs too old?"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:835
+#, python-format
+msgid ""
+"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:948
msgid "Instance destroyed successfully."
msgstr "インスタンスが正常に破棄されました。"
-#: nova/virt/libvirt/driver.py:904
+#: nova/virt/libvirt/driver.py:958
msgid "Instance may be started again."
msgstr "インスタンスを再び開始できます。"
-#: nova/virt/libvirt/driver.py:914
+#: nova/virt/libvirt/driver.py:968
msgid "Going to destroy instance again."
msgstr "インスタンスの破棄を再び行います。"
-#: nova/virt/libvirt/driver.py:1518
+#: nova/virt/libvirt/driver.py:1576
msgid "Beginning live snapshot process"
msgstr "ライブ・スナップショット・プロセスを開始しています"
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1579
msgid "Beginning cold snapshot process"
msgstr "コールド・スナップショット・プロセスを開始しています"
-#: nova/virt/libvirt/driver.py:1550
+#: nova/virt/libvirt/driver.py:1608
msgid "Snapshot extracted, beginning image upload"
msgstr ""
"スナップショットが抽出されました。イメージのアップロードを開始しています"
-#: nova/virt/libvirt/driver.py:1562
+#: nova/virt/libvirt/driver.py:1620
msgid "Snapshot image upload complete"
msgstr "スナップショット・イメージのアップロードが完了しました"
-#: nova/virt/libvirt/driver.py:1972
+#: nova/virt/libvirt/driver.py:2132
msgid "Instance soft rebooted successfully."
msgstr "インスタンスが正常にソフト・リブートされました。"
-#: nova/virt/libvirt/driver.py:2015
+#: nova/virt/libvirt/driver.py:2175
msgid "Instance shutdown successfully."
msgstr "インスタンスが正常にシャットダウンされました。"
-#: nova/virt/libvirt/driver.py:2023
+#: nova/virt/libvirt/driver.py:2183
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
"インスタンスはソフト・リブート時にリブートされた可能性があるため、ここで返し"
"ます。"
-#: nova/virt/libvirt/driver.py:2091
+#: nova/virt/libvirt/driver.py:2252
msgid "Instance rebooted successfully."
msgstr "インスタンスが正常にリブートされました。"
-#: nova/virt/libvirt/driver.py:2259
+#: nova/virt/libvirt/driver.py:2420
msgid "Instance spawned successfully."
msgstr "インスタンスが正常に作成されました。"
-#: nova/virt/libvirt/driver.py:2275
+#: nova/virt/libvirt/driver.py:2436
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr "データ: %(data)r, ファイルパス: %(fpath)r"
-#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341
+#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr ""
"切り捨てられたコンソール・ログが返されました。%d バイトが無視されました"
-#: nova/virt/libvirt/driver.py:2568
+#: nova/virt/libvirt/driver.py:2731
msgid "Creating image"
msgstr "イメージの作成中"
-#: nova/virt/libvirt/driver.py:2677
+#: nova/virt/libvirt/driver.py:2857
msgid "Using config drive"
msgstr "構成ドライブを使用中"
-#: nova/virt/libvirt/driver.py:2686
+#: nova/virt/libvirt/driver.py:2866
#, python-format
msgid "Creating config drive at %(path)s"
msgstr "構成ドライブを %(path)s に作成しています"
-#: nova/virt/libvirt/driver.py:3223
+#: nova/virt/libvirt/driver.py:3437
msgid "Configuring timezone for windows instance to localtime"
msgstr ""
-#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821
-#: nova/virt/libvirt/driver.py:3849
-#, python-format
-msgid "libvirt can't find a domain with id: %s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:4109
+#: nova/virt/libvirt/driver.py:4320
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4115
+#: nova/virt/libvirt/driver.py:4326
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats for "
@@ -202,50 +266,50 @@ msgstr ""
"インスタンス %s 用のドメインが Libvirt 内で見つかりませんでした。デバイスのブ"
"ロックの統計を取得できません"
-#: nova/virt/libvirt/driver.py:4330
+#: nova/virt/libvirt/driver.py:4568
#, python-format
msgid "Instance launched has CPU info: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4986
+#: nova/virt/libvirt/driver.py:5316
msgid "Instance running successfully."
msgstr "インスタンスが正常に実行されています。"
-#: nova/virt/libvirt/driver.py:5226
+#: nova/virt/libvirt/driver.py:5590
#, python-format
msgid "Deleting instance files %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5238
+#: nova/virt/libvirt/driver.py:5603
#, python-format
msgid "Deletion of %s failed"
msgstr ""
-#: nova/virt/libvirt/driver.py:5241
+#: nova/virt/libvirt/driver.py:5607
#, python-format
msgid "Deletion of %s complete"
msgstr ""
-#: nova/virt/libvirt/firewall.py:105
+#: nova/virt/libvirt/firewall.py:106
msgid "Called setup_basic_filtering in nwfilter"
msgstr "nwfilter で setup_basic_filtering を呼び出しました"
-#: nova/virt/libvirt/firewall.py:113
+#: nova/virt/libvirt/firewall.py:114
msgid "Ensuring static filters"
msgstr "静的フィルターの確認中"
-#: nova/virt/libvirt/firewall.py:306
+#: nova/virt/libvirt/firewall.py:305
msgid "Attempted to unfilter instance which is not filtered"
msgstr ""
"フィルター処理されていないインスタンスに対してフィルター処理の取り消しが試み"
"られました"
-#: nova/virt/libvirt/imagecache.py:191
+#: nova/virt/libvirt/imagecache.py:190
#, python-format
msgid "Writing stored info to %s"
msgstr "保管された情報を %s に書き込んでいます"
-#: nova/virt/libvirt/imagecache.py:401
+#: nova/virt/libvirt/imagecache.py:400
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash stored"
@@ -253,27 +317,27 @@ msgstr ""
"(%(base_file)s) にあるイメージ %(id)s: イメージの検査がスキップされました。"
"ハッシュは保管されていません"
-#: nova/virt/libvirt/imagecache.py:410
+#: nova/virt/libvirt/imagecache.py:409
#, python-format
msgid "%(id)s (%(base_file)s): generating checksum"
msgstr "%(id)s (%(base_file)s): チェックサムの生成中"
-#: nova/virt/libvirt/imagecache.py:438
+#: nova/virt/libvirt/imagecache.py:437
#, python-format
msgid "Base file too young to remove: %s"
msgstr "基本ファイルは新しいため削除できません: %s"
-#: nova/virt/libvirt/imagecache.py:441
+#: nova/virt/libvirt/imagecache.py:440
#, python-format
msgid "Removing base file: %s"
msgstr "基本ファイルを削除しています: %s"
-#: nova/virt/libvirt/imagecache.py:459
+#: nova/virt/libvirt/imagecache.py:458
#, python-format
msgid "image %(id)s at (%(base_file)s): checking"
msgstr "(%(base_file)s) にあるイメージ %(id)s: 検査中"
-#: nova/virt/libvirt/imagecache.py:483
+#: nova/virt/libvirt/imagecache.py:482
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
@@ -282,26 +346,26 @@ msgstr ""
"(%(base_file)s) にあるイメージ %(id)s: 使用中: このノード上では %(local)d "
"ローカル、このインスタンスのストレージを共有する他のノード上では %(remote)d"
-#: nova/virt/libvirt/imagecache.py:550
+#: nova/virt/libvirt/imagecache.py:549
#, python-format
msgid "Active base files: %s"
msgstr "アクティブな基本ファイル: %s"
-#: nova/virt/libvirt/imagecache.py:553
+#: nova/virt/libvirt/imagecache.py:552
#, python-format
msgid "Corrupt base files: %s"
msgstr "破損した基本ファイル: %s"
-#: nova/virt/libvirt/imagecache.py:557
+#: nova/virt/libvirt/imagecache.py:556
#, python-format
msgid "Removable base files: %s"
msgstr "削除可能な基本ファイル: %s"
-#: nova/virt/libvirt/utils.py:530
+#: nova/virt/libvirt/utils.py:490
msgid "findmnt tool is not installed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1352
+#: nova/virt/xenapi/vm_utils.py:1355
#, python-format
msgid ""
"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s "
diff --git a/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po b/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po
index 898ffd2a71..f4e330f39e 100644
--- a/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/ko_KR/LC_MESSAGES/nova-log-error.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:08+0000\n"
+"POT-Creation-Date: 2014-08-18 06:04+0000\n"
"PO-Revision-Date: 2014-06-16 04:10+0000\n"
"Last-Translator: jaekwon.park \n"
"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/nova/"
@@ -40,11 +40,305 @@ msgstr ""
msgid "Exception running %(name)s post-hook: %(obj)s"
msgstr ""
-#: nova/api/ec2/__init__.py:243
+#: nova/api/ec2/__init__.py:244
#, python-format
msgid "Keystone failure: %s"
msgstr ""
+#: nova/api/ec2/__init__.py:493
+#, python-format
+msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:520
+#, python-format
+msgid "Environment: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:155
+#, python-format
+msgid "Failed to get metadata for ip: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:212
+#, python-format
+msgid "Failed to get metadata for instance id: %s"
+msgstr ""
+
+#: nova/api/openstack/common.py:134
+#, python-format
+msgid ""
+"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad "
+"upgrade or db corrupted?"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:684
+#, python-format
+msgid "Exception handling resource: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:68
+#, python-format
+msgid "Compute.api::pause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:90
+#, python-format
+msgid "Compute.api::unpause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:112
+#, python-format
+msgid "compute.api::suspend %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:134
+#, python-format
+msgid "compute.api::resume %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:160
+#, python-format
+msgid "Error in migrate %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:179
+#, python-format
+msgid "Compute.api::reset_network %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:198
+#, python-format
+msgid "Compute.api::inject_network_info %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:215
+#, python-format
+msgid "Compute.api::lock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:234
+#, python-format
+msgid "Compute.api::unlock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:392
+#, python-format
+msgid "Compute.api::resetState %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/multinic.py:85
+#, python-format
+msgid "Unable to find address %r"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85
+msgid "Failed to get default networks"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125
+msgid "Failed to update usages deallocating network."
+msgstr ""
+
+#: nova/compute/api.py:561
+msgid "Failed to set instance name using multi_instance_display_name_template."
+msgstr ""
+
+#: nova/compute/api.py:1429
+msgid ""
+"Something wrong happened when trying to delete snapshot from shelved "
+"instance."
+msgstr ""
+
+#: nova/compute/api.py:3732
+msgid "Failed to update usages deallocating security group"
+msgstr ""
+
+#: nova/compute/flavors.py:167
+#, python-format
+msgid "DB error: %s"
+msgstr ""
+
+#: nova/compute/flavors.py:178
+#, python-format
+msgid "Instance type %s not found for deletion"
+msgstr ""
+
+#: nova/compute/manager.py:366
+#, python-format
+msgid "Error while trying to clean up image %s"
+msgstr ""
+
+#: nova/compute/manager.py:755
+msgid "Failed to check if instance shared"
+msgstr ""
+
+#: nova/compute/manager.py:821 nova/compute/manager.py:872
+msgid "Failed to complete a deletion"
+msgstr ""
+
+#: nova/compute/manager.py:913
+msgid "Failed to stop instance"
+msgstr ""
+
+#: nova/compute/manager.py:925
+msgid "Failed to start instance"
+msgstr ""
+
+#: nova/compute/manager.py:950
+msgid "Failed to revert crashed migration"
+msgstr ""
+
+#: nova/compute/manager.py:1364
+msgid "Failed to dealloc network for deleted instance"
+msgstr ""
+
+#: nova/compute/manager.py:1385
+msgid "Failed to dealloc network for failed instance"
+msgstr ""
+
+#: nova/compute/manager.py:1458 nova/compute/manager.py:3527
+msgid "Error trying to reschedule"
+msgstr ""
+
+#: nova/compute/manager.py:1567
+#, python-format
+msgid "Instance failed network setup after %(attempts)d attempt(s)"
+msgstr ""
+
+#: nova/compute/manager.py:1761
+msgid "Instance failed block device setup"
+msgstr ""
+
+#: nova/compute/manager.py:1781 nova/compute/manager.py:2123
+#: nova/compute/manager.py:4071
+msgid "Instance failed to spawn"
+msgstr ""
+
+#: nova/compute/manager.py:1964
+msgid "Unexpected build failure, not rescheduling build."
+msgstr ""
+
+#: nova/compute/manager.py:2033 nova/compute/manager.py:2085
+msgid "Failed to allocate network(s)"
+msgstr ""
+
+#: nova/compute/manager.py:2111
+msgid "Failure prepping block device"
+msgstr ""
+
+#: nova/compute/manager.py:2144
+msgid "Failed to deallocate networks"
+msgstr ""
+
+#: nova/compute/manager.py:2374 nova/compute/manager.py:3718
+#: nova/compute/manager.py:5822
+msgid "Setting instance vm_state to ERROR"
+msgstr ""
+
+#: nova/compute/manager.py:2586 nova/compute/manager.py:4933
+#, python-format
+msgid "Failed to get compute_info for %s"
+msgstr ""
+
+#: nova/compute/manager.py:3013
+#, python-format
+msgid "set_admin_password failed: %s"
+msgstr ""
+
+#: nova/compute/manager.py:3098
+msgid "Error trying to Rescue Instance"
+msgstr ""
+
+#: nova/compute/manager.py:3724
+#, python-format
+msgid "Failed to rollback quota for failed finish_resize: %s"
+msgstr ""
+
+#: nova/compute/manager.py:4323
+#, python-format
+msgid "Failed to attach %(volume_id)s at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4362
+#, python-format
+msgid "Failed to detach volume %(volume_id)s from %(mp)s"
+msgstr ""
+
+#: nova/compute/manager.py:4441
+#, python-format
+msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s"
+msgstr ""
+
+#: nova/compute/manager.py:4448
+#, python-format
+msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4735
+#, python-format
+msgid "Pre live migration failed at %s"
+msgstr ""
+
+#: nova/compute/manager.py:5235
+msgid "Periodic task failed to offload instance."
+msgstr ""
+
+#: nova/compute/manager.py:5275
+#, python-format
+msgid "Failed to generate usage audit for instance on host %s"
+msgstr ""
+
+#: nova/compute/manager.py:5465
+msgid ""
+"Periodic sync_power_state task had an error while processing an instance."
+msgstr ""
+
+#: nova/compute/manager.py:5568 nova/compute/manager.py:5577
+#: nova/compute/manager.py:5608 nova/compute/manager.py:5619
+msgid "error during stop() in sync_power_state."
+msgstr ""
+
+#: nova/network/neutronv2/api.py:234
+#, python-format
+msgid "Neutron error creating port on network %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:418
+#, python-format
+msgid "Failed to update port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:425
+#, python-format
+msgid "Failed to delete port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524
+#, python-format
+msgid "Failed to delete neutron port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:697
+#, python-format
+msgid "Failed to access port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:931
+#, python-format
+msgid "Unable to access floating IP %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1065
+#, python-format
+msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1124
+#, python-format
+msgid "Unable to update host of port %s"
+msgstr ""
+
#: nova/objects/instance_fault.py:87
msgid "Failed to notify cells of instance fault"
msgstr "셀에 인스턴스 결함을 알리지 못했음"
@@ -59,35 +353,35 @@ msgstr "기존 예외가 삭제됨: %s"
msgid "Unexpected exception occurred %d time(s)... retrying."
msgstr "예기치 않은 예외 %d 번 발생하였습니다... 다시 시도중."
-#: nova/openstack/common/lockutils.py:120
+#: nova/openstack/common/lockutils.py:119
#, python-format
msgid "Could not release the acquired lock `%s`"
msgstr ""
-#: nova/openstack/common/loopingcall.py:89
+#: nova/openstack/common/loopingcall.py:95
msgid "in fixed duration looping call"
msgstr "고정 기간 루프 호출에서"
-#: nova/openstack/common/loopingcall.py:136
+#: nova/openstack/common/loopingcall.py:138
msgid "in dynamic looping call"
msgstr "동적 루프 호출에서"
-#: nova/openstack/common/periodic_task.py:179
+#: nova/openstack/common/periodic_task.py:202
#, python-format
msgid "Error during %(full_task_name)s: %(e)s"
msgstr "%(full_task_name)s 중 오류: %(e)s"
-#: nova/openstack/common/policy.py:511
+#: nova/openstack/common/policy.py:507
#, python-format
msgid "Failed to understand rule %s"
msgstr "%s 규칙을 이해하는데 실패했습니다"
-#: nova/openstack/common/policy.py:521
+#: nova/openstack/common/policy.py:517
#, python-format
msgid "No handler for matches of kind %s"
msgstr "%s 유형의 일치에 대한 핸들러가 없음"
-#: nova/openstack/common/policy.py:791
+#: nova/openstack/common/policy.py:787
#, python-format
msgid "Failed to understand rule %r"
msgstr "룰 %r를 이해하지 못했습니다."
@@ -117,170 +411,184 @@ msgstr "DB 예외가 랩핑되었습니다."
msgid "Failed to migrate to version %s on engine %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:625
+#: nova/pci/pci_stats.py:119
+msgid ""
+"Failed to allocate PCI devices for instance. Unassigning devices back to "
+"pools. This should not happen, since the scheduler should have accurate "
+"information, and allocation during claims is controlled via a hold on the "
+"compute node semaphore"
+msgstr ""
+
+#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109
+#, python-format
+msgid "PCI device %s not found"
+msgstr ""
+
+#: nova/virt/disk/api.py:388
+#, python-format
+msgid ""
+"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:639
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:749
+#: nova/virt/libvirt/driver.py:764
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:873
+#: nova/virt/libvirt/driver.py:927
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:889
-msgid "During wait destroy, instance disappeared."
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:951
+#: nova/virt/libvirt/driver.py:1005
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:977
+#: nova/virt/libvirt/driver.py:1033
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1389
+#: nova/virt/libvirt/driver.py:1444
msgid "attaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1414
+#: nova/virt/libvirt/driver.py:1471
msgid "detaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1663
+#: nova/virt/libvirt/driver.py:1726
msgid "Failed to send updated snapshot status to volume service."
msgstr ""
-#: nova/virt/libvirt/driver.py:1749
+#: nova/virt/libvirt/driver.py:1834
msgid ""
"Unable to create quiesced VM snapshot, attempting again with quiescing "
"disabled."
msgstr ""
-#: nova/virt/libvirt/driver.py:1755
+#: nova/virt/libvirt/driver.py:1840
msgid "Unable to create VM snapshot, failing volume_snapshot operation."
msgstr ""
-#: nova/virt/libvirt/driver.py:1804
+#: nova/virt/libvirt/driver.py:1889
msgid ""
"Error occurred during volume_snapshot_create, sending error status to Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:1951
+#: nova/virt/libvirt/driver.py:2111
msgid ""
"Error occurred during volume_snapshot_delete, sending error status to Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421
+#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:2542
+#: nova/virt/libvirt/driver.py:2705
#, python-format
msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2693
+#: nova/virt/libvirt/driver.py:2873
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2786
+#: nova/virt/libvirt/driver.py:2966
#, python-format
msgid "Attaching PCI devices %(dev)s to %(dom)s failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:3553
+#: nova/virt/libvirt/driver.py:3783
#, python-format
-msgid "An error occurred while trying to define a domain with xml: %s"
+msgid "Error defining a domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3562
+#: nova/virt/libvirt/driver.py:3787
#, python-format
-msgid "An error occurred while trying to launch a defined domain with xml: %s"
+msgid "Error launching a defined domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3571
+#: nova/virt/libvirt/driver.py:3792
#, python-format
-msgid "An error occurred while enabling hairpin mode on domain with xml: %s"
+msgid "Error enabling hairpin mode with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3589
+#: nova/virt/libvirt/driver.py:3806
#, python-format
msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3904
+#: nova/virt/libvirt/driver.py:4115
#, python-format
msgid ""
"Hostname has changed from %(old)s to %(new)s. A restart is required to take "
"effect."
msgstr ""
-#: nova/virt/libvirt/driver.py:4481
+#: nova/virt/libvirt/driver.py:4794
#, python-format
msgid "Live Migration failure: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5231
+#: nova/virt/libvirt/driver.py:5596
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:202
+#: nova/virt/libvirt/imagebackend.py:200
#, python-format
msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:230
+#: nova/virt/libvirt/imagebackend.py:227
#, python-format
msgid ""
"%(base)s virtual size %(base_size)s larger than flavor root disk size "
"%(size)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:501
-#, python-format
-msgid "error opening rbd image %s"
-msgstr ""
-
-#: nova/virt/libvirt/imagecache.py:130
+#: nova/virt/libvirt/imagecache.py:129
#, python-format
msgid "Error reading image info file %(filename)s: %(error)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:391
+#: nova/virt/libvirt/imagecache.py:390
#, python-format
msgid "image %(id)s at (%(base_file)s): image verification failed"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:448
+#: nova/virt/libvirt/imagecache.py:447
#, python-format
msgid "Failed to remove %(base_file)s, error was %(error)s"
msgstr ""
-#: nova/virt/libvirt/lvm.py:201
+#: nova/virt/libvirt/lvm.py:200
#, python-format
msgid "ignoring unrecognized volume_clear='%s' value"
msgstr ""
-#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572
-#: nova/virt/libvirt/vif.py:596
+#: nova/virt/libvirt/rbd_utils.py:62
+#, python-format
+msgid "error opening rbd image %s"
+msgstr ""
+
+#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474
+#: nova/virt/libvirt/vif.py:496
msgid "Failed while plugging vif"
msgstr ""
-#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676
-#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717
-#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762
-#: nova/virt/libvirt/vif.py:784
+#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560
+#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598
+#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639
msgid "Failed while unplugging vif"
msgstr ""
@@ -289,12 +597,28 @@ msgstr ""
msgid "Unknown content in connection_info/access_mode: %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:666
+#: nova/virt/libvirt/volume.py:669
#, python-format
msgid "Couldn't unmount the NFS share %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:815
+#: nova/virt/libvirt/volume.py:818
#, python-format
msgid "Couldn't unmount the GlusterFS share %s"
msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:508
+#, python-format
+msgid ""
+"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1551
+#, python-format
+msgid "Attaching network adapter failed. Exception: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1591
+#, python-format
+msgid "Detaching network adapter failed. Exception: %s"
+msgstr ""
diff --git a/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po b/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po
index 53e0163716..f206474a16 100644
--- a/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/ko_KR/LC_MESSAGES/nova-log-info.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
"PO-Revision-Date: 2014-06-30 04:40+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/nova/"
@@ -19,27 +19,77 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=1; plural=0;\n"
+#: nova/api/openstack/__init__.py:101
+#, python-format
+msgid "%(url)s returned with HTTP %(status)d"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:294
+msgid "V3 API has been disabled by configuration"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:688
+#, python-format
+msgid "Fault thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:691
+#, python-format
+msgid "HTTP exception thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_networks.py:101
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128
+#, python-format
+msgid "Deleting network with id %s"
+msgstr ""
+
+#: nova/compute/manager.py:2663
+#, python-format
+msgid "bringing vm to original state: '%s'"
+msgstr ""
+
+#: nova/compute/manager.py:5471
+#, python-format
+msgid ""
+"During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36
+msgid ""
+"Skipped adding reservations_deleted_expire_idx because an equivalent index "
+"already exists."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58
+msgid ""
+"Skipped removing reservations_deleted_expire_idx because index does not "
+"exist."
+msgstr ""
+
#: nova/openstack/common/eventlet_backdoor.py:141
#, python-format
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
msgstr "Eventlet 백도어는 프로세스 %(pid)d 일 동안 %(port)s에서 수신"
-#: nova/openstack/common/lockutils.py:83
+#: nova/openstack/common/lockutils.py:82
#, python-format
msgid "Created lock path: %s"
msgstr ""
-#: nova/openstack/common/lockutils.py:250
+#: nova/openstack/common/lockutils.py:251
#, python-format
msgid "Failed to remove file %(file)s"
msgstr ""
-#: nova/openstack/common/periodic_task.py:125
+#: nova/openstack/common/periodic_task.py:126
#, python-format
msgid "Skipping periodic task %(task)s because its interval is negative"
msgstr "간격이 음수이기 때문에 주기적 태스크 %(task)s을(를) 건너뜀"
-#: nova/openstack/common/periodic_task.py:130
+#: nova/openstack/common/periodic_task.py:131
#, python-format
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr "사용 안하기 때문에 주기적 태스크 %(task)s을(를) 건너뜀"
@@ -101,96 +151,110 @@ msgstr ""
msgid "%(num_values)d values found, of which the minimum value will be used."
msgstr ""
-#: nova/virt/libvirt/driver.py:894
+#: nova/virt/block_device.py:221
+#, python-format
+msgid "preserve multipath_id %s"
+msgstr ""
+
+#: nova/virt/firewall.py:444
+#, python-format
+msgid "instance chain %s disappeared during refresh, skipping"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:139
+msgid "Unable to force TCG mode, libguestfs too old?"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:835
+#, python-format
+msgid ""
+"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:948
msgid "Instance destroyed successfully."
msgstr "인스턴스가 영구 삭제되었습니다. "
-#: nova/virt/libvirt/driver.py:904
+#: nova/virt/libvirt/driver.py:958
msgid "Instance may be started again."
msgstr "인스턴스가 다시 시작됩니다."
-#: nova/virt/libvirt/driver.py:914
+#: nova/virt/libvirt/driver.py:968
msgid "Going to destroy instance again."
msgstr "인스턴스를 다시 영구 삭제하려 합니다."
-#: nova/virt/libvirt/driver.py:1518
+#: nova/virt/libvirt/driver.py:1576
msgid "Beginning live snapshot process"
msgstr "라이브 스냅샷 프로세스 시작 중"
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1579
msgid "Beginning cold snapshot process"
msgstr "콜드 스냅샷 프로세스 시작 중"
-#: nova/virt/libvirt/driver.py:1550
+#: nova/virt/libvirt/driver.py:1608
msgid "Snapshot extracted, beginning image upload"
msgstr "스냅샷 추출, 이미지 업로드 시작 중"
-#: nova/virt/libvirt/driver.py:1562
+#: nova/virt/libvirt/driver.py:1620
msgid "Snapshot image upload complete"
msgstr "스냅샷 이미지 업로드 완료"
-#: nova/virt/libvirt/driver.py:1972
+#: nova/virt/libvirt/driver.py:2132
msgid "Instance soft rebooted successfully."
msgstr "인스턴스가 소프트 리부트되었습니다. "
-#: nova/virt/libvirt/driver.py:2015
+#: nova/virt/libvirt/driver.py:2175
msgid "Instance shutdown successfully."
msgstr "인스턴스가 시스템 종료되었습니다. "
-#: nova/virt/libvirt/driver.py:2023
+#: nova/virt/libvirt/driver.py:2183
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
"인스턴스가 소프트 리부트 중에 다시 부팅되었을 수 있으므로, 지금 리턴합니다. "
-#: nova/virt/libvirt/driver.py:2091
+#: nova/virt/libvirt/driver.py:2252
msgid "Instance rebooted successfully."
msgstr "인스턴스가 다시 부트되었습니다. "
-#: nova/virt/libvirt/driver.py:2259
+#: nova/virt/libvirt/driver.py:2420
msgid "Instance spawned successfully."
msgstr "인스턴스가 파생되었습니다. "
-#: nova/virt/libvirt/driver.py:2275
+#: nova/virt/libvirt/driver.py:2436
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr "데이터: %(data)r, fpath: %(fpath)r"
-#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341
+#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr "잘린 콘솔 로그가 리턴되었으며, %d 바이트는 무시됨"
-#: nova/virt/libvirt/driver.py:2568
+#: nova/virt/libvirt/driver.py:2731
msgid "Creating image"
msgstr "이미지 작성 중"
-#: nova/virt/libvirt/driver.py:2677
+#: nova/virt/libvirt/driver.py:2857
msgid "Using config drive"
msgstr "구성 드라이브 사용 중"
-#: nova/virt/libvirt/driver.py:2686
+#: nova/virt/libvirt/driver.py:2866
#, python-format
msgid "Creating config drive at %(path)s"
msgstr "%(path)s에 구성 드라이브 작성 중"
-#: nova/virt/libvirt/driver.py:3223
+#: nova/virt/libvirt/driver.py:3437
msgid "Configuring timezone for windows instance to localtime"
msgstr ""
-#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821
-#: nova/virt/libvirt/driver.py:3849
-#, python-format
-msgid "libvirt can't find a domain with id: %s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:4109
+#: nova/virt/libvirt/driver.py:4320
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4115
+#: nova/virt/libvirt/driver.py:4326
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats for "
@@ -199,75 +263,75 @@ msgstr ""
"%s 인스턴스에 대한 libvirt에서 도메인을 찾을 수 없습니다. 디바이스의 블록 통"
"계를 가져올 수 없음"
-#: nova/virt/libvirt/driver.py:4330
+#: nova/virt/libvirt/driver.py:4568
#, python-format
msgid "Instance launched has CPU info: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4986
+#: nova/virt/libvirt/driver.py:5316
msgid "Instance running successfully."
msgstr "인스턴스가 정상적으로 실행 중입니다. "
-#: nova/virt/libvirt/driver.py:5226
+#: nova/virt/libvirt/driver.py:5590
#, python-format
msgid "Deleting instance files %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5238
+#: nova/virt/libvirt/driver.py:5603
#, python-format
msgid "Deletion of %s failed"
msgstr ""
-#: nova/virt/libvirt/driver.py:5241
+#: nova/virt/libvirt/driver.py:5607
#, python-format
msgid "Deletion of %s complete"
msgstr ""
-#: nova/virt/libvirt/firewall.py:105
+#: nova/virt/libvirt/firewall.py:106
msgid "Called setup_basic_filtering in nwfilter"
msgstr "nwfilter에서 setup_basic_filtering을 호출했음"
-#: nova/virt/libvirt/firewall.py:113
+#: nova/virt/libvirt/firewall.py:114
msgid "Ensuring static filters"
msgstr "정적 필터 확인 중"
-#: nova/virt/libvirt/firewall.py:306
+#: nova/virt/libvirt/firewall.py:305
msgid "Attempted to unfilter instance which is not filtered"
msgstr "필터링되지 않는 인스턴스를 필터링 해제하려고 했음"
-#: nova/virt/libvirt/imagecache.py:191
+#: nova/virt/libvirt/imagecache.py:190
#, python-format
msgid "Writing stored info to %s"
msgstr "%s에 저장된 정보 기록 중"
-#: nova/virt/libvirt/imagecache.py:401
+#: nova/virt/libvirt/imagecache.py:400
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash stored"
msgstr ""
"(%(base_file)s)의 이미지 %(id)s: 이미지 검증 건너뜀. 해시가 저장되지 않음"
-#: nova/virt/libvirt/imagecache.py:410
+#: nova/virt/libvirt/imagecache.py:409
#, python-format
msgid "%(id)s (%(base_file)s): generating checksum"
msgstr "%(id)s (%(base_file)s): 체크섬 생성 중"
-#: nova/virt/libvirt/imagecache.py:438
+#: nova/virt/libvirt/imagecache.py:437
#, python-format
msgid "Base file too young to remove: %s"
msgstr "기본 파일이 제거하기엔 너무 신생임: %s"
-#: nova/virt/libvirt/imagecache.py:441
+#: nova/virt/libvirt/imagecache.py:440
#, python-format
msgid "Removing base file: %s"
msgstr "기본 파일 제거 중: %s"
-#: nova/virt/libvirt/imagecache.py:459
+#: nova/virt/libvirt/imagecache.py:458
#, python-format
msgid "image %(id)s at (%(base_file)s): checking"
msgstr "(%(base_file)s)의 이미지 %(id)s: 검사 중"
-#: nova/virt/libvirt/imagecache.py:483
+#: nova/virt/libvirt/imagecache.py:482
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
@@ -276,26 +340,26 @@ msgstr ""
"(%(base_file)s)의 이미지 %(id)s: 사용 중. 이 노드의 %(local)d 로컬과 다른 노"
"드의 %(remote)d이(가) 이 인스턴스 스노리지를 공유함"
-#: nova/virt/libvirt/imagecache.py:550
+#: nova/virt/libvirt/imagecache.py:549
#, python-format
msgid "Active base files: %s"
msgstr "활성 기본 파일: %s"
-#: nova/virt/libvirt/imagecache.py:553
+#: nova/virt/libvirt/imagecache.py:552
#, python-format
msgid "Corrupt base files: %s"
msgstr "손상된 기본 파일: %s"
-#: nova/virt/libvirt/imagecache.py:557
+#: nova/virt/libvirt/imagecache.py:556
#, python-format
msgid "Removable base files: %s"
msgstr "제거 가능한 기본 파일: %s"
-#: nova/virt/libvirt/utils.py:530
+#: nova/virt/libvirt/utils.py:490
msgid "findmnt tool is not installed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1352
+#: nova/virt/xenapi/vm_utils.py:1355
#, python-format
msgid ""
"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s "
diff --git a/nova/locale/nova-log-critical.pot b/nova/locale/nova-log-critical.pot
index 83a24b3e1b..6455c0c05d 100644
--- a/nova/locale/nova-log-critical.pot
+++ b/nova/locale/nova-log-critical.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: nova 2014.2.dev88.ged965df\n"
+"Project-Id-Version: nova 2014.2.dev425.g05dbf0d\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-17 06:07+0000\n"
+"POT-Creation-Date: 2014-08-12 06:05+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language-Team: LANGUAGE \n"
@@ -17,3 +17,20 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 1.3\n"
+#: nova/api/openstack/__init__.py:331
+#, python-format
+msgid "Missing core API extensions: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:658
+#, python-format
+msgid ""
+"Unable to connect to server at %(server)s, sleeping for %(seconds)s "
+"seconds"
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:767
+#, python-format
+msgid "In vmwareapi: _call_method (session=%s)"
+msgstr ""
+
diff --git a/nova/locale/nova-log-error.pot b/nova/locale/nova-log-error.pot
index 49ae51960d..862a79a349 100644
--- a/nova/locale/nova-log-error.pot
+++ b/nova/locale/nova-log-error.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: nova 2014.2.dev374.g4e35f5f\n"
+"Project-Id-Version: nova 2014.2.dev566.gd156d7f\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:08+0000\n"
+"POT-Creation-Date: 2014-08-18 06:04+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language-Team: LANGUAGE \n"
@@ -37,11 +37,304 @@ msgstr ""
msgid "Exception running %(name)s post-hook: %(obj)s"
msgstr ""
-#: nova/api/ec2/__init__.py:243
+#: nova/api/ec2/__init__.py:244
#, python-format
msgid "Keystone failure: %s"
msgstr ""
+#: nova/api/ec2/__init__.py:493
+#, python-format
+msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:520
+#, python-format
+msgid "Environment: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:155
+#, python-format
+msgid "Failed to get metadata for ip: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:212
+#, python-format
+msgid "Failed to get metadata for instance id: %s"
+msgstr ""
+
+#: nova/api/openstack/common.py:134
+#, python-format
+msgid ""
+"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. "
+"Bad upgrade or db corrupted?"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:684
+#, python-format
+msgid "Exception handling resource: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:68
+#, python-format
+msgid "Compute.api::pause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:90
+#, python-format
+msgid "Compute.api::unpause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:112
+#, python-format
+msgid "compute.api::suspend %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:134
+#, python-format
+msgid "compute.api::resume %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:160
+#, python-format
+msgid "Error in migrate %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:179
+#, python-format
+msgid "Compute.api::reset_network %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:198
+#, python-format
+msgid "Compute.api::inject_network_info %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:215
+#, python-format
+msgid "Compute.api::lock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:234
+#, python-format
+msgid "Compute.api::unlock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:392
+#, python-format
+msgid "Compute.api::resetState %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/multinic.py:85
+#, python-format
+msgid "Unable to find address %r"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85
+msgid "Failed to get default networks"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125
+msgid "Failed to update usages deallocating network."
+msgstr ""
+
+#: nova/compute/api.py:561
+msgid "Failed to set instance name using multi_instance_display_name_template."
+msgstr ""
+
+#: nova/compute/api.py:1429
+msgid ""
+"Something wrong happened when trying to delete snapshot from shelved "
+"instance."
+msgstr ""
+
+#: nova/compute/api.py:3732
+msgid "Failed to update usages deallocating security group"
+msgstr ""
+
+#: nova/compute/flavors.py:167
+#, python-format
+msgid "DB error: %s"
+msgstr ""
+
+#: nova/compute/flavors.py:178
+#, python-format
+msgid "Instance type %s not found for deletion"
+msgstr ""
+
+#: nova/compute/manager.py:366
+#, python-format
+msgid "Error while trying to clean up image %s"
+msgstr ""
+
+#: nova/compute/manager.py:755
+msgid "Failed to check if instance shared"
+msgstr ""
+
+#: nova/compute/manager.py:821 nova/compute/manager.py:872
+msgid "Failed to complete a deletion"
+msgstr ""
+
+#: nova/compute/manager.py:913
+msgid "Failed to stop instance"
+msgstr ""
+
+#: nova/compute/manager.py:925
+msgid "Failed to start instance"
+msgstr ""
+
+#: nova/compute/manager.py:950
+msgid "Failed to revert crashed migration"
+msgstr ""
+
+#: nova/compute/manager.py:1364
+msgid "Failed to dealloc network for deleted instance"
+msgstr ""
+
+#: nova/compute/manager.py:1385
+msgid "Failed to dealloc network for failed instance"
+msgstr ""
+
+#: nova/compute/manager.py:1458 nova/compute/manager.py:3527
+msgid "Error trying to reschedule"
+msgstr ""
+
+#: nova/compute/manager.py:1567
+#, python-format
+msgid "Instance failed network setup after %(attempts)d attempt(s)"
+msgstr ""
+
+#: nova/compute/manager.py:1761
+msgid "Instance failed block device setup"
+msgstr ""
+
+#: nova/compute/manager.py:1781 nova/compute/manager.py:2123
+#: nova/compute/manager.py:4071
+msgid "Instance failed to spawn"
+msgstr ""
+
+#: nova/compute/manager.py:1964
+msgid "Unexpected build failure, not rescheduling build."
+msgstr ""
+
+#: nova/compute/manager.py:2033 nova/compute/manager.py:2085
+msgid "Failed to allocate network(s)"
+msgstr ""
+
+#: nova/compute/manager.py:2111
+msgid "Failure prepping block device"
+msgstr ""
+
+#: nova/compute/manager.py:2144
+msgid "Failed to deallocate networks"
+msgstr ""
+
+#: nova/compute/manager.py:2374 nova/compute/manager.py:3718
+#: nova/compute/manager.py:5822
+msgid "Setting instance vm_state to ERROR"
+msgstr ""
+
+#: nova/compute/manager.py:2586 nova/compute/manager.py:4933
+#, python-format
+msgid "Failed to get compute_info for %s"
+msgstr ""
+
+#: nova/compute/manager.py:3013
+#, python-format
+msgid "set_admin_password failed: %s"
+msgstr ""
+
+#: nova/compute/manager.py:3098
+msgid "Error trying to Rescue Instance"
+msgstr ""
+
+#: nova/compute/manager.py:3724
+#, python-format
+msgid "Failed to rollback quota for failed finish_resize: %s"
+msgstr ""
+
+#: nova/compute/manager.py:4323
+#, python-format
+msgid "Failed to attach %(volume_id)s at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4362
+#, python-format
+msgid "Failed to detach volume %(volume_id)s from %(mp)s"
+msgstr ""
+
+#: nova/compute/manager.py:4441
+#, python-format
+msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s"
+msgstr ""
+
+#: nova/compute/manager.py:4448
+#, python-format
+msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4735
+#, python-format
+msgid "Pre live migration failed at %s"
+msgstr ""
+
+#: nova/compute/manager.py:5235
+msgid "Periodic task failed to offload instance."
+msgstr ""
+
+#: nova/compute/manager.py:5275
+#, python-format
+msgid "Failed to generate usage audit for instance on host %s"
+msgstr ""
+
+#: nova/compute/manager.py:5465
+msgid "Periodic sync_power_state task had an error while processing an instance."
+msgstr ""
+
+#: nova/compute/manager.py:5568 nova/compute/manager.py:5577
+#: nova/compute/manager.py:5608 nova/compute/manager.py:5619
+msgid "error during stop() in sync_power_state."
+msgstr ""
+
+#: nova/network/neutronv2/api.py:234
+#, python-format
+msgid "Neutron error creating port on network %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:418
+#, python-format
+msgid "Failed to update port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:425
+#, python-format
+msgid "Failed to delete port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524
+#, python-format
+msgid "Failed to delete neutron port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:697
+#, python-format
+msgid "Failed to access port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:931
+#, python-format
+msgid "Unable to access floating IP %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1065
+#, python-format
+msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1124
+#, python-format
+msgid "Unable to update host of port %s"
+msgstr ""
+
#: nova/objects/instance_fault.py:87
msgid "Failed to notify cells of instance fault"
msgstr ""
@@ -56,35 +349,35 @@ msgstr ""
msgid "Unexpected exception occurred %d time(s)... retrying."
msgstr ""
-#: nova/openstack/common/lockutils.py:120
+#: nova/openstack/common/lockutils.py:119
#, python-format
msgid "Could not release the acquired lock `%s`"
msgstr ""
-#: nova/openstack/common/loopingcall.py:89
+#: nova/openstack/common/loopingcall.py:95
msgid "in fixed duration looping call"
msgstr ""
-#: nova/openstack/common/loopingcall.py:136
+#: nova/openstack/common/loopingcall.py:138
msgid "in dynamic looping call"
msgstr ""
-#: nova/openstack/common/periodic_task.py:179
+#: nova/openstack/common/periodic_task.py:202
#, python-format
msgid "Error during %(full_task_name)s: %(e)s"
msgstr ""
-#: nova/openstack/common/policy.py:511
+#: nova/openstack/common/policy.py:507
#, python-format
msgid "Failed to understand rule %s"
msgstr ""
-#: nova/openstack/common/policy.py:521
+#: nova/openstack/common/policy.py:517
#, python-format
msgid "No handler for matches of kind %s"
msgstr ""
-#: nova/openstack/common/policy.py:791
+#: nova/openstack/common/policy.py:787
#, python-format
msgid "Failed to understand rule %r"
msgstr ""
@@ -114,172 +407,187 @@ msgstr ""
msgid "Failed to migrate to version %s on engine %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:625
+#: nova/pci/pci_stats.py:119
+msgid ""
+"Failed to allocate PCI devices for instance. Unassigning devices back to "
+"pools. This should not happen, since the scheduler should have accurate "
+"information, and allocation during claims is controlled via a hold on the"
+" compute node semaphore"
+msgstr ""
+
+#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109
+#, python-format
+msgid "PCI device %s not found"
+msgstr ""
+
+#: nova/virt/disk/api.py:388
+#, python-format
+msgid ""
+"Failed to mount container filesystem '%(image)s' on '%(target)s': "
+"%(errors)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:639
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:749
+#: nova/virt/libvirt/driver.py:764
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:873
+#: nova/virt/libvirt/driver.py:927
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:889
-msgid "During wait destroy, instance disappeared."
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:951
+#: nova/virt/libvirt/driver.py:1005
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:977
+#: nova/virt/libvirt/driver.py:1033
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1389
+#: nova/virt/libvirt/driver.py:1444
msgid "attaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1414
+#: nova/virt/libvirt/driver.py:1471
msgid "detaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1663
+#: nova/virt/libvirt/driver.py:1726
msgid "Failed to send updated snapshot status to volume service."
msgstr ""
-#: nova/virt/libvirt/driver.py:1749
+#: nova/virt/libvirt/driver.py:1834
msgid ""
"Unable to create quiesced VM snapshot, attempting again with quiescing "
"disabled."
msgstr ""
-#: nova/virt/libvirt/driver.py:1755
+#: nova/virt/libvirt/driver.py:1840
msgid "Unable to create VM snapshot, failing volume_snapshot operation."
msgstr ""
-#: nova/virt/libvirt/driver.py:1804
+#: nova/virt/libvirt/driver.py:1889
msgid ""
"Error occurred during volume_snapshot_create, sending error status to "
"Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:1951
+#: nova/virt/libvirt/driver.py:2111
msgid ""
"Error occurred during volume_snapshot_delete, sending error status to "
"Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421
+#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:2542
+#: nova/virt/libvirt/driver.py:2705
#, python-format
msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2693
+#: nova/virt/libvirt/driver.py:2873
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2786
+#: nova/virt/libvirt/driver.py:2966
#, python-format
msgid "Attaching PCI devices %(dev)s to %(dom)s failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:3553
+#: nova/virt/libvirt/driver.py:3783
#, python-format
-msgid "An error occurred while trying to define a domain with xml: %s"
+msgid "Error defining a domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3562
+#: nova/virt/libvirt/driver.py:3787
#, python-format
-msgid "An error occurred while trying to launch a defined domain with xml: %s"
+msgid "Error launching a defined domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3571
+#: nova/virt/libvirt/driver.py:3792
#, python-format
-msgid "An error occurred while enabling hairpin mode on domain with xml: %s"
+msgid "Error enabling hairpin mode with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3589
+#: nova/virt/libvirt/driver.py:3806
#, python-format
msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3904
+#: nova/virt/libvirt/driver.py:4115
#, python-format
msgid ""
"Hostname has changed from %(old)s to %(new)s. A restart is required to "
"take effect."
msgstr ""
-#: nova/virt/libvirt/driver.py:4481
+#: nova/virt/libvirt/driver.py:4794
#, python-format
msgid "Live Migration failure: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5231
+#: nova/virt/libvirt/driver.py:5596
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:202
+#: nova/virt/libvirt/imagebackend.py:200
#, python-format
msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:230
+#: nova/virt/libvirt/imagebackend.py:227
#, python-format
msgid ""
"%(base)s virtual size %(base_size)s larger than flavor root disk size "
"%(size)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:501
-#, python-format
-msgid "error opening rbd image %s"
-msgstr ""
-
-#: nova/virt/libvirt/imagecache.py:130
+#: nova/virt/libvirt/imagecache.py:129
#, python-format
msgid "Error reading image info file %(filename)s: %(error)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:391
+#: nova/virt/libvirt/imagecache.py:390
#, python-format
msgid "image %(id)s at (%(base_file)s): image verification failed"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:448
+#: nova/virt/libvirt/imagecache.py:447
#, python-format
msgid "Failed to remove %(base_file)s, error was %(error)s"
msgstr ""
-#: nova/virt/libvirt/lvm.py:201
+#: nova/virt/libvirt/lvm.py:200
#, python-format
msgid "ignoring unrecognized volume_clear='%s' value"
msgstr ""
-#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572
-#: nova/virt/libvirt/vif.py:596
+#: nova/virt/libvirt/rbd_utils.py:62
+#, python-format
+msgid "error opening rbd image %s"
+msgstr ""
+
+#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474
+#: nova/virt/libvirt/vif.py:496
msgid "Failed while plugging vif"
msgstr ""
-#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676
-#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717
-#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762
-#: nova/virt/libvirt/vif.py:784
+#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560
+#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598
+#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639
msgid "Failed while unplugging vif"
msgstr ""
@@ -288,13 +596,28 @@ msgstr ""
msgid "Unknown content in connection_info/access_mode: %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:666
+#: nova/virt/libvirt/volume.py:669
#, python-format
msgid "Couldn't unmount the NFS share %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:815
+#: nova/virt/libvirt/volume.py:818
#, python-format
msgid "Couldn't unmount the GlusterFS share %s"
msgstr ""
+#: nova/virt/vmwareapi/vmops.py:508
+#, python-format
+msgid "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1551
+#, python-format
+msgid "Attaching network adapter failed. Exception: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1591
+#, python-format
+msgid "Detaching network adapter failed. Exception: %s"
+msgstr ""
+
diff --git a/nova/locale/nova-log-info.pot b/nova/locale/nova-log-info.pot
index 0ada885bf3..adeb552085 100644
--- a/nova/locale/nova-log-info.pot
+++ b/nova/locale/nova-log-info.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: nova 2014.2.dev374.g4e35f5f\n"
+"Project-Id-Version: nova 2014.2.dev566.gd156d7f\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language-Team: LANGUAGE \n"
@@ -17,27 +17,76 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 1.3\n"
+#: nova/api/openstack/__init__.py:101
+#, python-format
+msgid "%(url)s returned with HTTP %(status)d"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:294
+msgid "V3 API has been disabled by configuration"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:688
+#, python-format
+msgid "Fault thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:691
+#, python-format
+msgid "HTTP exception thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_networks.py:101
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128
+#, python-format
+msgid "Deleting network with id %s"
+msgstr ""
+
+#: nova/compute/manager.py:2663
+#, python-format
+msgid "bringing vm to original state: '%s'"
+msgstr ""
+
+#: nova/compute/manager.py:5471
+#, python-format
+msgid "During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36
+msgid ""
+"Skipped adding reservations_deleted_expire_idx because an equivalent "
+"index already exists."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58
+msgid ""
+"Skipped removing reservations_deleted_expire_idx because index does not "
+"exist."
+msgstr ""
+
#: nova/openstack/common/eventlet_backdoor.py:141
#, python-format
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
msgstr ""
-#: nova/openstack/common/lockutils.py:83
+#: nova/openstack/common/lockutils.py:82
#, python-format
msgid "Created lock path: %s"
msgstr ""
-#: nova/openstack/common/lockutils.py:250
+#: nova/openstack/common/lockutils.py:251
#, python-format
msgid "Failed to remove file %(file)s"
msgstr ""
-#: nova/openstack/common/periodic_task.py:125
+#: nova/openstack/common/periodic_task.py:126
#, python-format
msgid "Skipping periodic task %(task)s because its interval is negative"
msgstr ""
-#: nova/openstack/common/periodic_task.py:130
+#: nova/openstack/common/periodic_task.py:131
#, python-format
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr ""
@@ -99,196 +148,211 @@ msgstr ""
msgid "%(num_values)d values found, of which the minimum value will be used."
msgstr ""
-#: nova/virt/libvirt/driver.py:894
+#: nova/virt/block_device.py:221
+#, python-format
+msgid "preserve multipath_id %s"
+msgstr ""
+
+#: nova/virt/firewall.py:444
+#, python-format
+msgid "instance chain %s disappeared during refresh, skipping"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:139
+msgid "Unable to force TCG mode, libguestfs too old?"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:835
+#, python-format
+msgid ""
+"Unable to use bulk domain list APIs, falling back to slow code path: "
+"%(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:948
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:904
+#: nova/virt/libvirt/driver.py:958
msgid "Instance may be started again."
msgstr ""
-#: nova/virt/libvirt/driver.py:914
+#: nova/virt/libvirt/driver.py:968
msgid "Going to destroy instance again."
msgstr ""
-#: nova/virt/libvirt/driver.py:1518
+#: nova/virt/libvirt/driver.py:1576
msgid "Beginning live snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1579
msgid "Beginning cold snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:1550
+#: nova/virt/libvirt/driver.py:1608
msgid "Snapshot extracted, beginning image upload"
msgstr ""
-#: nova/virt/libvirt/driver.py:1562
+#: nova/virt/libvirt/driver.py:1620
msgid "Snapshot image upload complete"
msgstr ""
-#: nova/virt/libvirt/driver.py:1972
+#: nova/virt/libvirt/driver.py:2132
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2015
+#: nova/virt/libvirt/driver.py:2175
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2023
+#: nova/virt/libvirt/driver.py:2183
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
-#: nova/virt/libvirt/driver.py:2091
+#: nova/virt/libvirt/driver.py:2252
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2259
+#: nova/virt/libvirt/driver.py:2420
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2275
+#: nova/virt/libvirt/driver.py:2436
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341
+#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr ""
-#: nova/virt/libvirt/driver.py:2568
+#: nova/virt/libvirt/driver.py:2731
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:2677
+#: nova/virt/libvirt/driver.py:2857
msgid "Using config drive"
msgstr ""
-#: nova/virt/libvirt/driver.py:2686
+#: nova/virt/libvirt/driver.py:2866
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3223
+#: nova/virt/libvirt/driver.py:3437
msgid "Configuring timezone for windows instance to localtime"
msgstr ""
-#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821
-#: nova/virt/libvirt/driver.py:3849
-#, python-format
-msgid "libvirt can't find a domain with id: %s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:4109
+#: nova/virt/libvirt/driver.py:4320
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. "
"Instance=%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4115
+#: nova/virt/libvirt/driver.py:4326
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats "
"for device"
msgstr ""
-#: nova/virt/libvirt/driver.py:4330
+#: nova/virt/libvirt/driver.py:4568
#, python-format
msgid "Instance launched has CPU info: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4986
+#: nova/virt/libvirt/driver.py:5316
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:5226
+#: nova/virt/libvirt/driver.py:5590
#, python-format
msgid "Deleting instance files %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5238
+#: nova/virt/libvirt/driver.py:5603
#, python-format
msgid "Deletion of %s failed"
msgstr ""
-#: nova/virt/libvirt/driver.py:5241
+#: nova/virt/libvirt/driver.py:5607
#, python-format
msgid "Deletion of %s complete"
msgstr ""
-#: nova/virt/libvirt/firewall.py:105
+#: nova/virt/libvirt/firewall.py:106
msgid "Called setup_basic_filtering in nwfilter"
msgstr ""
-#: nova/virt/libvirt/firewall.py:113
+#: nova/virt/libvirt/firewall.py:114
msgid "Ensuring static filters"
msgstr ""
-#: nova/virt/libvirt/firewall.py:306
+#: nova/virt/libvirt/firewall.py:305
msgid "Attempted to unfilter instance which is not filtered"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:191
+#: nova/virt/libvirt/imagecache.py:190
#, python-format
msgid "Writing stored info to %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:401
+#: nova/virt/libvirt/imagecache.py:400
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash "
"stored"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:410
+#: nova/virt/libvirt/imagecache.py:409
#, python-format
msgid "%(id)s (%(base_file)s): generating checksum"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:438
+#: nova/virt/libvirt/imagecache.py:437
#, python-format
msgid "Base file too young to remove: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:441
+#: nova/virt/libvirt/imagecache.py:440
#, python-format
msgid "Removing base file: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:459
+#: nova/virt/libvirt/imagecache.py:458
#, python-format
msgid "image %(id)s at (%(base_file)s): checking"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:483
+#: nova/virt/libvirt/imagecache.py:482
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
"%(remote)d on other nodes sharing this instance storage"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:550
+#: nova/virt/libvirt/imagecache.py:549
#, python-format
msgid "Active base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:553
+#: nova/virt/libvirt/imagecache.py:552
#, python-format
msgid "Corrupt base files: %s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:557
+#: nova/virt/libvirt/imagecache.py:556
#, python-format
msgid "Removable base files: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:530
+#: nova/virt/libvirt/utils.py:490
msgid "findmnt tool is not installed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1352
+#: nova/virt/xenapi/vm_utils.py:1355
#, python-format
msgid ""
"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s "
diff --git a/nova/locale/nova-log-warning.pot b/nova/locale/nova-log-warning.pot
index 5dfa8f908c..95d18da37a 100644
--- a/nova/locale/nova-log-warning.pot
+++ b/nova/locale/nova-log-warning.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: nova 2014.2.dev374.g4e35f5f\n"
+"Project-Id-Version: nova 2014.2.dev566.gd156d7f\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:08+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language-Team: LANGUAGE \n"
@@ -17,10 +17,142 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 1.3\n"
-#: nova/compute/manager.py:1998
+#: nova/api/auth.py:73
+msgid "ratelimit_v3 is removed from v3 api."
+msgstr ""
+
+#: nova/api/auth.py:160
+msgid "Sourcing roles from deprecated X-Role HTTP header"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:169
+#, python-format
+msgid ""
+"Access key %(access_key)s has had %(failures)d failed authentications and"
+" will be locked out for %(lock_mins)d minutes."
+msgstr ""
+
+#: nova/api/ec2/cloud.py:1290
+#: nova/api/openstack/compute/contrib/floating_ips.py:254
+#, python-format
+msgid "multiple fixed_ips exist, using the first: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:119
+msgid ""
+"X-Instance-ID present in request headers. The 'service_metadata_proxy' "
+"option must be enabled to process this header."
+msgstr ""
+
+#: nova/api/metadata/handler.py:189
+#, python-format
+msgid ""
+"X-Instance-ID-Signature: %(signature)s does not match the expected value:"
+" %(expected_signature)s for id: %(instance_id)s. Request From: "
+"%(remote_address)s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:215
+#, python-format
+msgid ""
+"Tenant_id %(tenant_id)s does not match tenant_id of instance "
+"%(instance_id)s."
+msgstr ""
+
+#: nova/api/metadata/vendordata_json.py:47
+msgid "file does not exist"
+msgstr ""
+
+#: nova/api/metadata/vendordata_json.py:49
+msgid "Unexpected IOError when reading"
+msgstr ""
+
+#: nova/api/metadata/vendordata_json.py:53
+msgid "failed to load json"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:235 nova/api/openstack/__init__.py:409
+#, python-format
+msgid ""
+"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such "
+"resource"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:282
+#: nova/api/openstack/compute/plugins/v3/servers.py:104
+#, python-format
+msgid "Not loading %s because it is in the blacklist"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:287
+#: nova/api/openstack/compute/plugins/v3/servers.py:109
+#, python-format
+msgid "Not loading %s because it is not in the whitelist"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:307
+#, python-format
+msgid "Extensions in both blacklist and whitelist: %s"
+msgstr ""
+
+#: nova/api/openstack/common.py:456
+msgid "Rejecting snapshot request, snapshots currently disabled"
+msgstr ""
+
+#: nova/api/openstack/extensions.py:279
+#, python-format
+msgid "Failed to load extension %(ext_factory)s: %(exc)s"
+msgstr ""
+
+#: nova/api/openstack/compute/servers.py:82
+msgid ""
+"XML support has been deprecated and may be removed as early as the Juno "
+"release."
+msgstr ""
+
+#: nova/api/openstack/compute/views/servers.py:197
+msgid "Instance has had its instance_type removed from the DB"
+msgstr ""
+
+#: nova/compute/manager.py:2023
msgid "No more network or fixed IP to be allocated"
msgstr ""
+#: nova/compute/manager.py:2263
+#, python-format
+msgid "Ignoring EndpointNotFound: %s"
+msgstr ""
+
+#: nova/compute/manager.py:2281
+#, python-format
+msgid "Failed to delete volume: %(volume_id)s due to %(exc)s"
+msgstr ""
+
+#: nova/compute/utils.py:204
+#, python-format
+msgid "Can't access image %(image_id)s: %(error)s"
+msgstr ""
+
+#: nova/compute/utils.py:328
+#, python-format
+msgid ""
+"No host name specified for the notification of HostAPI.%s and it will be "
+"ignored"
+msgstr ""
+
+#: nova/compute/utils.py:456
+#, python-format
+msgid ""
+"Value of 0 or None specified for %s. This behaviour will change in "
+"meaning in the K release, to mean 'call at the default rate' rather than "
+"'do not call'. To keep the 'do not call' behaviour, use a negative value."
+msgstr ""
+
+#: nova/compute/resources/__init__.py:31
+#, python-format
+msgid "Compute resource plugin %s was not loaded"
+msgstr ""
+
#: nova/consoleauth/manager.py:84
#, python-format
msgid "Token: %(token)s failed to save into memcached."
@@ -31,20 +163,53 @@ msgstr ""
msgid "Instance: %(instance_uuid)s failed to save into memcached"
msgstr ""
-#: nova/openstack/common/loopingcall.py:82
+#: nova/network/neutronv2/api.py:218
+#, python-format
+msgid "Neutron error: Port quota exceeded in tenant: %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:223
+#, python-format
+msgid "Neutron error: No more fixed IPs in network: %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:227
+#, python-format
+msgid ""
+"Neutron error: MAC address %(mac)s is already in use on network "
+"%(network)s."
+msgstr ""
+
+#: nova/network/neutronv2/api.py:302
+msgid "No network configured!"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:497
+#, python-format
+msgid "Port %s does not exist"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1160
#, python-format
-msgid "task run outlasted interval by %s sec"
+msgid ""
+"Network %(id)s not matched with the tenants network! The ports tenant "
+"%(tenant_id)s will be used."
msgstr ""
-#: nova/openstack/common/network_utils.py:146
+#: nova/openstack/common/loopingcall.py:87
+#, python-format
+msgid "task %(func_name)s run outlasted interval by %(delay).2f sec"
+msgstr ""
+
+#: nova/openstack/common/network_utils.py:145
msgid "tcp_keepidle not available on your system"
msgstr ""
-#: nova/openstack/common/network_utils.py:153
+#: nova/openstack/common/network_utils.py:152
msgid "tcp_keepintvl not available on your system"
msgstr ""
-#: nova/openstack/common/network_utils.py:160
+#: nova/openstack/common/network_utils.py:159
msgid "tcp_keepknt not available on your system"
msgstr ""
@@ -72,7 +237,7 @@ msgstr ""
msgid "SQL connection failed. %s attempts left."
msgstr ""
-#: nova/openstack/common/db/sqlalchemy/utils.py:97
+#: nova/openstack/common/db/sqlalchemy/utils.py:96
msgid "Id not in sort_keys; is sort_keys unique?"
msgstr ""
@@ -80,7 +245,7 @@ msgstr ""
msgid "VCPUs not set; assuming CPU collection broken"
msgstr ""
-#: nova/scheduler/filters/core_filter.py:92
+#: nova/scheduler/filters/core_filter.py:102
#, python-format
msgid "Could not decode cpu_allocation_ratio: '%s'"
msgstr ""
@@ -90,12 +255,41 @@ msgstr ""
msgid "Could not decode ram_allocation_ratio: '%s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:368
+#: nova/storage/linuxscsi.py:100
+#, python-format
+msgid "Multipath call failed exit (%(code)s)"
+msgstr ""
+
+#: nova/storage/linuxscsi.py:121
+#, python-format
+msgid "Couldn't find multipath device %s"
+msgstr ""
+
+#: nova/storage/linuxscsi.py:130
+#, python-format
+msgid "Skip faulty line \"%(dev_line)s\" of multipath device %(mdev)s"
+msgstr ""
+
+#: nova/virt/disk/api.py:366
+#, python-format
+msgid "Ignoring error injecting data into image %(image)s (%(e)s)"
+msgstr ""
+
+#: nova/virt/disk/api.py:456
+#, python-format
+msgid "Ignoring error injecting %(inject)s into image (%(e)s)"
+msgstr ""
+
+#: nova/virt/disk/vfs/api.py:44
+msgid "Unable to import guestfs, falling back to VFSLocalFS"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:376
#, python-format
msgid "Invalid cachemode %(cache_mode)s specified for disk type %(disk_type)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:606
+#: nova/virt/libvirt/driver.py:614
#, python-format
msgid ""
"The libvirt driver is not tested on %(type)s/%(arch)s by the OpenStack "
@@ -103,108 +297,122 @@ msgid ""
"see: https://wiki.openstack.org/wiki/HypervisorSupportMatrix"
msgstr ""
-#: nova/virt/libvirt/driver.py:656
+#: nova/virt/libvirt/driver.py:671
#, python-format
msgid "URI %(uri)s does not support events: %(error)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:672
+#: nova/virt/libvirt/driver.py:687
#, python-format
msgid "URI %(uri)s does not support connection events: %(error)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:865
+#: nova/virt/libvirt/driver.py:919
msgid "Cannot destroy instance, operation time out"
msgstr ""
-#: nova/virt/libvirt/driver.py:971
+#: nova/virt/libvirt/driver.py:943
+msgid "During wait destroy, instance disappeared."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1027
msgid "Instance may be still running, destroy it again."
msgstr ""
-#: nova/virt/libvirt/driver.py:1026
+#: nova/virt/libvirt/driver.py:1080
#, python-format
msgid "Ignoring Volume Error on vol %(vol_id)s during delete %(exc)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1076
+#: nova/virt/libvirt/driver.py:1130
#, python-format
msgid "Volume %(disk)s possibly unsafe to remove, please clean up manually"
msgstr ""
-#: nova/virt/libvirt/driver.py:1357 nova/virt/libvirt/driver.py:1365
+#: nova/virt/libvirt/driver.py:1414 nova/virt/libvirt/driver.py:1422
msgid "During detach_volume, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:1410
+#: nova/virt/libvirt/driver.py:1467
msgid "During detach_interface, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:1976
+#: nova/virt/libvirt/driver.py:2136
msgid "Failed to soft reboot instance. Trying hard reboot."
msgstr ""
-#: nova/virt/libvirt/driver.py:2537
+#: nova/virt/libvirt/driver.py:2693
#, python-format
msgid "Image %s not found on disk storage. Continue without injecting data"
msgstr ""
-#: nova/virt/libvirt/driver.py:2700
+#: nova/virt/libvirt/driver.py:2880
msgid "File injection into a boot from volume instance is not supported"
msgstr ""
-#: nova/virt/libvirt/driver.py:2775
+#: nova/virt/libvirt/driver.py:2955
msgid "Instance disappeared while detaching a PCI device from it."
msgstr ""
-#: nova/virt/libvirt/driver.py:2830
+#: nova/virt/libvirt/driver.py:3010
#, python-format
msgid "Cannot update service status on host: %s,since it is not registered."
msgstr ""
-#: nova/virt/libvirt/driver.py:2833
+#: nova/virt/libvirt/driver.py:3013
#, python-format
msgid "Cannot update service status on host: %s,due to an unexpected exception."
msgstr ""
-#: nova/virt/libvirt/driver.py:2861
+#: nova/virt/libvirt/driver.py:3041
#, python-format
msgid "URI %(uri)s does not support full set of host capabilities: %(error)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3672
+#: nova/virt/libvirt/driver.py:3888
#, python-format
msgid "Timeout waiting for vif plugging callback for instance %(uuid)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3750
+#: nova/virt/libvirt/driver.py:3909
+#, python-format
+msgid "couldn't obtain the XML from domain: %(uuid)s, exception: %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:3966
msgid ""
"Cannot get the number of cpu, because this function is not implemented "
"for this platform. "
msgstr ""
-#: nova/virt/libvirt/driver.py:3813
+#: nova/virt/libvirt/driver.py:4028
#, python-format
-msgid "couldn't obtain the vpu count from domain id: %(id)s, exception: %(ex)s"
+msgid "couldn't obtain the vpu count from domain id: %(uuid)s, exception: %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4050
+#: nova/virt/libvirt/driver.py:4059
+#, python-format
+msgid "couldn't obtain the memory from domain: %(uuid)s, exception: %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:4261
#, python-format
msgid "URI %(uri)s does not support listDevices: %(error)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4594
+#: nova/virt/libvirt/driver.py:4916
#, python-format
msgid "plug_vifs() failed %(cnt)d. Retry up to %(max_retry)d."
msgstr ""
-#: nova/virt/libvirt/driver.py:4727
+#: nova/virt/libvirt/driver.py:5126
#, python-format
msgid ""
"Error from libvirt while getting description of %(instance_name)s: [Error"
" Code %(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4805
+#: nova/virt/libvirt/driver.py:5134
#, python-format
msgid ""
"Periodic task is updating the host stat, it is trying to get disk "
@@ -212,7 +420,7 @@ msgid ""
"resize."
msgstr ""
-#: nova/virt/libvirt/driver.py:4811
+#: nova/virt/libvirt/driver.py:5140
#, python-format
msgid ""
"Periodic task is updating the host stat, it is trying to get disk "
@@ -220,32 +428,32 @@ msgid ""
"exists on the compute node but is not managed by Nova."
msgstr ""
-#: nova/virt/libvirt/firewall.py:49
+#: nova/virt/libvirt/firewall.py:50
msgid ""
"Libvirt module could not be loaded. NWFilterFirewall will not work "
"correctly."
msgstr ""
-#: nova/virt/libvirt/imagecache.py:318
+#: nova/virt/libvirt/imagecache.py:317
#, python-format
msgid ""
"Instance %(instance)s is using a backing file %(backing)s which does not "
"appear in the image service"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:495
+#: nova/virt/libvirt/imagecache.py:494
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): warning -- an absent base file is in "
"use! instances: %(instance_list)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:545
+#: nova/virt/libvirt/imagecache.py:544
#, python-format
msgid "Unknown base file: %s"
msgstr ""
-#: nova/virt/libvirt/lvm.py:68
+#: nova/virt/libvirt/lvm.py:67
#, python-format
msgid ""
"Volume group %(vg)s will not be able to hold sparse volume %(lv)s. "
@@ -253,20 +461,13 @@ msgid ""
"%(free_space)db."
msgstr ""
-#: nova/virt/libvirt/utils.py:69 nova/virt/libvirt/utils.py:75
-msgid "systool is not installed"
-msgstr ""
-
-#: nova/virt/libvirt/utils.py:242
+#: nova/virt/libvirt/rbd_utils.py:268
#, python-format
-msgid "rbd remove %(name)s in pool %(pool)s failed"
+msgid "rbd remove %(volume)s in pool %(pool)s failed"
msgstr ""
-#: nova/virt/libvirt/vif.py:827
-#, python-format
-msgid ""
-"VIF driver \"%s\" is marked as deprecated and will be removed in the Juno"
-" release."
+#: nova/virt/libvirt/utils.py:69 nova/virt/libvirt/utils.py:75
+msgid "systool is not installed"
msgstr ""
#: nova/virt/libvirt/volume.py:132
@@ -274,56 +475,83 @@ msgstr ""
msgid "Unknown content in connection_info/qos_specs: %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:294
+#: nova/virt/libvirt/volume.py:297
#, python-format
msgid ""
"ISCSI volume not yet found at: %(disk_dev)s. Will rescan & retry. Try "
"number: %(tries)s"
msgstr ""
-#: nova/virt/libvirt/volume.py:361
+#: nova/virt/libvirt/volume.py:364
#, python-format
msgid "Unable to delete volume device %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:372
+#: nova/virt/libvirt/volume.py:375
#, python-format
msgid ""
"Failed to remove multipath device descriptor %(dev_mapper)s. Exception "
"message: %(msg)s"
msgstr ""
-#: nova/virt/libvirt/volume.py:694 nova/virt/libvirt/volume.py:843
+#: nova/virt/libvirt/volume.py:697 nova/virt/libvirt/volume.py:846
#, python-format
msgid "%s is already mounted"
msgstr ""
-#: nova/virt/libvirt/volume.py:739
+#: nova/virt/libvirt/volume.py:742
#, python-format
msgid "AoE volume not yet found at: %(aoedevpath)s. Try number: %(tries)s"
msgstr ""
-#: nova/virt/libvirt/volume.py:931
+#: nova/virt/libvirt/volume.py:934
#, python-format
msgid ""
"Fibre volume not yet found at: %(mount_device)s. Will rescan & retry. "
"Try number: %(tries)s"
msgstr ""
-#: nova/virt/libvirt/volume.py:1033
+#: nova/virt/libvirt/volume.py:995
+#, python-format
+msgid "multipath-tools probably work improperly. devices to remove = %s."
+msgstr ""
+
+#: nova/virt/libvirt/volume.py:1040
msgid "Value required for 'scality_sofs_config'"
msgstr ""
-#: nova/virt/libvirt/volume.py:1044
+#: nova/virt/libvirt/volume.py:1051
#, python-format
msgid "Cannot access 'scality_sofs_config': %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:1050
+#: nova/virt/libvirt/volume.py:1057
msgid "Cannot execute /sbin/mount.sofs"
msgstr ""
-#: nova/virt/libvirt/volume.py:1065
+#: nova/virt/libvirt/volume.py:1072
msgid "Cannot mount Scality SOFS, check syslog for errors"
msgstr ""
+#: nova/virt/vmwareapi/driver.py:96
+msgid ""
+"The VMware ESX driver is now deprecated and has been removed in the Juno "
+"release. The VC driver will remain and continue to be supported."
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:157
+#, python-format
+msgid "The following clusters could not be found in the vCenter %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:202
+msgid "Instance cannot be found in host, or in an unknownstate."
+msgstr ""
+
+#: nova/volume/cinder.py:249
+msgid ""
+"Cinder V1 API is deprecated as of the Juno release, and Nova is still "
+"configured to use it. Enable the V2 API in Cinder and set "
+"cinder_catalog_info in nova.conf to use it."
+msgstr ""
+
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index b66ba059c1..085cecbd31 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: nova 2014.2.dev374.g4e35f5f\n"
+"Project-Id-Version: nova 2014.2.dev566.gd156d7f\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME \n"
"Language-Team: LANGUAGE \n"
@@ -17,39 +17,43 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 1.3\n"
-#: nova/block_device.py:99
+#: nova/block_device.py:102
msgid "Some fields are invalid."
msgstr ""
-#: nova/block_device.py:109
+#: nova/block_device.py:112
msgid "Some required fields are missing"
msgstr ""
-#: nova/block_device.py:125
+#: nova/block_device.py:128
msgid "Boot index is invalid."
msgstr ""
-#: nova/block_device.py:168
+#: nova/block_device.py:171
msgid "Unrecognized legacy format."
msgstr ""
-#: nova/block_device.py:185
+#: nova/block_device.py:188
msgid "Invalid source_type field."
msgstr ""
-#: nova/block_device.py:189
+#: nova/block_device.py:191
+msgid "Invalid device UUID."
+msgstr ""
+
+#: nova/block_device.py:195
msgid "Missing device UUID."
msgstr ""
-#: nova/block_device.py:368
+#: nova/block_device.py:374
msgid "Device name empty or too long."
msgstr ""
-#: nova/block_device.py:372
+#: nova/block_device.py:378
msgid "Device name contains spaces."
msgstr ""
-#: nova/block_device.py:382
+#: nova/block_device.py:388
msgid "Invalid volume_size."
msgstr ""
@@ -328,7 +332,7 @@ msgstr ""
msgid "Group not valid. Reason: %(reason)s"
msgstr ""
-#: nova/exception.py:345 nova/openstack/common/db/sqlalchemy/utils.py:58
+#: nova/exception.py:345 nova/openstack/common/db/sqlalchemy/utils.py:57
msgid "Sort key supplied was not valid."
msgstr ""
@@ -399,88 +403,88 @@ msgstr ""
msgid "Failed to deploy instance: %(reason)s"
msgstr ""
-#: nova/exception.py:402
+#: nova/exception.py:402 nova/exception.py:406
#, python-format
msgid "Failed to launch instances: %(reason)s"
msgstr ""
-#: nova/exception.py:406
+#: nova/exception.py:410
msgid "Service is unavailable at this time."
msgstr ""
-#: nova/exception.py:410
+#: nova/exception.py:414
#, python-format
msgid "Insufficient compute resources: %(reason)s."
msgstr ""
-#: nova/exception.py:414
+#: nova/exception.py:418
#, python-format
msgid "Connection to the hypervisor is broken on host: %(host)s"
msgstr ""
-#: nova/exception.py:418
+#: nova/exception.py:422
#, python-format
msgid "Compute service of %(host)s is unavailable at this time."
msgstr ""
-#: nova/exception.py:422
+#: nova/exception.py:426
#, python-format
msgid "Compute service of %(host)s is still in use."
msgstr ""
-#: nova/exception.py:426
+#: nova/exception.py:430
#, python-format
msgid "Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
msgstr ""
-#: nova/exception.py:431
+#: nova/exception.py:435
msgid "The supplied hypervisor type of is invalid."
msgstr ""
-#: nova/exception.py:435
+#: nova/exception.py:439
msgid "The instance requires a newer hypervisor version than has been provided."
msgstr ""
-#: nova/exception.py:440
+#: nova/exception.py:444
#, python-format
msgid ""
"The supplied disk path (%(path)s) already exists, it is expected not to "
"exist."
msgstr ""
-#: nova/exception.py:445
+#: nova/exception.py:449
#, python-format
msgid "The supplied device path (%(path)s) is invalid."
msgstr ""
-#: nova/exception.py:449
+#: nova/exception.py:453
#, python-format
msgid "The supplied device path (%(path)s) is in use."
msgstr ""
-#: nova/exception.py:454
+#: nova/exception.py:458
#, python-format
msgid "The supplied device (%(device)s) is busy."
msgstr ""
-#: nova/exception.py:458
+#: nova/exception.py:462
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr ""
-#: nova/exception.py:462
+#: nova/exception.py:466
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
msgstr ""
-#: nova/exception.py:466
+#: nova/exception.py:470
#, python-format
msgid ""
"VLAN tag is not appropriate for the port group %(bridge)s. Expected VLAN "
"tag is %(tag)s, but the one associated with the port group is %(pgroup)s."
msgstr ""
-#: nova/exception.py:472
+#: nova/exception.py:476
#, python-format
msgid ""
"vSwitch which contains the port group %(bridge)s is not associated with "
@@ -488,111 +492,111 @@ msgid ""
"one associated is %(actual)s."
msgstr ""
-#: nova/exception.py:479
+#: nova/exception.py:483
#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr ""
-#: nova/exception.py:483
+#: nova/exception.py:487
#, python-format
msgid "Disk info file is invalid: %(reason)s"
msgstr ""
-#: nova/exception.py:487
+#: nova/exception.py:491
#, python-format
msgid "Failed to read or write disk info file: %(reason)s"
msgstr ""
-#: nova/exception.py:491
+#: nova/exception.py:495
#, python-format
msgid "Image %(image_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:495
+#: nova/exception.py:499
#, python-format
msgid "Instance %(instance_id)s is unacceptable: %(reason)s"
msgstr ""
-#: nova/exception.py:499
+#: nova/exception.py:503
#, python-format
msgid "Ec2 id %(ec2_id)s is unacceptable."
msgstr ""
-#: nova/exception.py:503
+#: nova/exception.py:507
#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr ""
-#: nova/exception.py:507
+#: nova/exception.py:511
#, python-format
msgid "Invalid ID received %(id)s."
msgstr ""
-#: nova/exception.py:511
+#: nova/exception.py:515
msgid "Constraint not met."
msgstr ""
-#: nova/exception.py:516
+#: nova/exception.py:520
msgid "Resource could not be found."
msgstr ""
-#: nova/exception.py:521
+#: nova/exception.py:525
#, python-format
msgid "No agent-build associated with id %(id)s."
msgstr ""
-#: nova/exception.py:525
+#: nova/exception.py:529
#, python-format
msgid ""
"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
"%(architecture)s exists."
msgstr ""
-#: nova/exception.py:531
+#: nova/exception.py:535
#, python-format
msgid "Volume %(volume_id)s could not be found."
msgstr ""
-#: nova/exception.py:535
+#: nova/exception.py:539
#, python-format
msgid "No volume Block Device Mapping with id %(volume_id)s."
msgstr ""
-#: nova/exception.py:540
+#: nova/exception.py:544
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr ""
-#: nova/exception.py:544
+#: nova/exception.py:548
#, python-format
msgid "No disk at %(location)s"
msgstr ""
-#: nova/exception.py:548
+#: nova/exception.py:552
#, python-format
msgid "Could not find a handler for %(driver_type)s volume."
msgstr ""
-#: nova/exception.py:552
+#: nova/exception.py:556
#, python-format
msgid "Invalid image href %(image_href)s."
msgstr ""
-#: nova/exception.py:556
+#: nova/exception.py:560
#, python-format
msgid "Requested image %(image)s has automatic disk resize disabled."
msgstr ""
-#: nova/exception.py:561
+#: nova/exception.py:565
#, python-format
msgid "Image %(image_id)s could not be found."
msgstr ""
-#: nova/exception.py:565
+#: nova/exception.py:569
msgid "The current driver does not support preserving ephemeral partitions."
msgstr ""
-#: nova/exception.py:571
+#: nova/exception.py:575
#, python-format
msgid ""
"Image %(image_id)s could not be found. The nova EC2 API assigns image ids"
@@ -600,1169 +604,1271 @@ msgid ""
"image ids since adding this image?"
msgstr ""
-#: nova/exception.py:578
+#: nova/exception.py:582
#, python-format
msgid "Project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:582
+#: nova/exception.py:586
msgid "Cannot find SR to read/write VDI."
msgstr ""
-#: nova/exception.py:586
+#: nova/exception.py:590
#, python-format
msgid "Network %(network_id)s is duplicated."
msgstr ""
-#: nova/exception.py:590
+#: nova/exception.py:594
#, python-format
msgid "Network %(network_id)s is still in use."
msgstr ""
-#: nova/exception.py:594
+#: nova/exception.py:598
#, python-format
msgid "%(req)s is required to create a network."
msgstr ""
-#: nova/exception.py:598
+#: nova/exception.py:602
#, python-format
msgid "Network %(network_id)s could not be found."
msgstr ""
-#: nova/exception.py:602
+#: nova/exception.py:606
#, python-format
msgid "Port id %(port_id)s could not be found."
msgstr ""
-#: nova/exception.py:606
+#: nova/exception.py:610
#, python-format
msgid "Network could not be found for bridge %(bridge)s"
msgstr ""
-#: nova/exception.py:610
+#: nova/exception.py:614
#, python-format
msgid "Network could not be found for uuid %(uuid)s"
msgstr ""
-#: nova/exception.py:614
+#: nova/exception.py:618
#, python-format
msgid "Network could not be found with cidr %(cidr)s."
msgstr ""
-#: nova/exception.py:618
+#: nova/exception.py:622
#, python-format
msgid "Network could not be found for instance %(instance_id)s."
msgstr ""
-#: nova/exception.py:622
+#: nova/exception.py:626
msgid "No networks defined."
msgstr ""
-#: nova/exception.py:626
+#: nova/exception.py:630
msgid "No more available networks."
msgstr ""
-#: nova/exception.py:630
+#: nova/exception.py:634
#, python-format
msgid ""
"Either network uuid %(network_uuid)s is not present or is not assigned to"
" the project %(project_id)s."
msgstr ""
-#: nova/exception.py:635
+#: nova/exception.py:639
msgid ""
"More than one possible network found. Specify network ID(s) to select "
"which one(s) to connect to,"
msgstr ""
-#: nova/exception.py:640
+#: nova/exception.py:644
#, python-format
msgid "Network %(network_uuid)s requires a subnet in order to boot instances on."
msgstr ""
-#: nova/exception.py:645
+#: nova/exception.py:649
#, python-format
msgid ""
"It is not allowed to create an interface on external network "
"%(network_uuid)s"
msgstr ""
-#: nova/exception.py:650
+#: nova/exception.py:654
+#, python-format
+msgid "Physical network is missing for network %(network_uuid)s"
+msgstr ""
+
+#: nova/exception.py:658
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
-#: nova/exception.py:654
+#: nova/exception.py:662
#, python-format
msgid "Port %(port_id)s is still in use."
msgstr ""
-#: nova/exception.py:658
+#: nova/exception.py:666
#, python-format
msgid "Port %(port_id)s requires a FixedIP in order to be used."
msgstr ""
-#: nova/exception.py:662
+#: nova/exception.py:670
#, python-format
msgid "Port %(port_id)s not usable for instance %(instance)s."
msgstr ""
-#: nova/exception.py:666
+#: nova/exception.py:674
#, python-format
msgid "No free port available for instance %(instance)s."
msgstr ""
-#: nova/exception.py:670
+#: nova/exception.py:678
#, python-format
msgid "Fixed ip %(address)s already exists."
msgstr ""
-#: nova/exception.py:674
+#: nova/exception.py:682
#, python-format
msgid "No fixed IP associated with id %(id)s."
msgstr ""
-#: nova/exception.py:678
+#: nova/exception.py:686
#, python-format
msgid "Fixed ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:682
+#: nova/exception.py:690
#, python-format
msgid "Instance %(instance_uuid)s has zero fixed ips."
msgstr ""
-#: nova/exception.py:686
+#: nova/exception.py:694
#, python-format
msgid "Network host %(host)s has zero fixed ips in network %(network_id)s."
msgstr ""
-#: nova/exception.py:691
+#: nova/exception.py:699
#, python-format
msgid "Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'."
msgstr ""
-#: nova/exception.py:695
+#: nova/exception.py:703
#, python-format
msgid ""
"Fixed IP address (%(address)s) does not exist in network "
"(%(network_uuid)s)."
msgstr ""
-#: nova/exception.py:700
+#: nova/exception.py:708
#, python-format
msgid ""
"Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s."
msgstr ""
-#: nova/exception.py:705
+#: nova/exception.py:713
#, python-format
msgid "More than one instance is associated with fixed ip address '%(address)s'."
msgstr ""
-#: nova/exception.py:710
+#: nova/exception.py:718
#, python-format
msgid "Fixed IP address %(address)s is invalid."
msgstr ""
-#: nova/exception.py:715
+#: nova/exception.py:723
msgid "Zero fixed ips available."
msgstr ""
-#: nova/exception.py:719
+#: nova/exception.py:727
msgid "Zero fixed ips could be found."
msgstr ""
-#: nova/exception.py:723
+#: nova/exception.py:731
#, python-format
msgid "Floating ip %(address)s already exists."
msgstr ""
-#: nova/exception.py:728
+#: nova/exception.py:736
#, python-format
msgid "Floating ip not found for id %(id)s."
msgstr ""
-#: nova/exception.py:732
+#: nova/exception.py:740
#, python-format
msgid "The DNS entry %(name)s already exists in domain %(domain)s."
msgstr ""
-#: nova/exception.py:736
+#: nova/exception.py:744
#, python-format
msgid "Floating ip not found for address %(address)s."
msgstr ""
-#: nova/exception.py:740
+#: nova/exception.py:748
#, python-format
msgid "Floating ip not found for host %(host)s."
msgstr ""
-#: nova/exception.py:744
+#: nova/exception.py:752
#, python-format
msgid "Multiple floating ips are found for address %(address)s."
msgstr ""
-#: nova/exception.py:748
+#: nova/exception.py:756
msgid "Floating ip pool not found."
msgstr ""
-#: nova/exception.py:753
+#: nova/exception.py:761
msgid "Zero floating ips available."
msgstr ""
-#: nova/exception.py:759
+#: nova/exception.py:767
#, python-format
msgid "Floating ip %(address)s is associated."
msgstr ""
-#: nova/exception.py:763
+#: nova/exception.py:771
#, python-format
msgid "Floating ip %(address)s is not associated."
msgstr ""
-#: nova/exception.py:767
+#: nova/exception.py:775
msgid "Zero floating ips exist."
msgstr ""
-#: nova/exception.py:772
+#: nova/exception.py:780
#, python-format
msgid "Interface %(interface)s not found."
msgstr ""
-#: nova/exception.py:777 nova/api/openstack/compute/contrib/floating_ips.py:97
+#: nova/exception.py:785 nova/api/openstack/compute/contrib/floating_ips.py:98
msgid "Cannot disassociate auto assigned floating ip"
msgstr ""
-#: nova/exception.py:782
+#: nova/exception.py:790
#, python-format
msgid "Keypair %(name)s not found for user %(user_id)s"
msgstr ""
-#: nova/exception.py:786
+#: nova/exception.py:794
#, python-format
msgid "Service %(service_id)s could not be found."
msgstr ""
-#: nova/exception.py:790
+#: nova/exception.py:798
#, python-format
msgid "Service with host %(host)s binary %(binary)s exists."
msgstr ""
-#: nova/exception.py:794
+#: nova/exception.py:802
#, python-format
msgid "Service with host %(host)s topic %(topic)s exists."
msgstr ""
-#: nova/exception.py:798
+#: nova/exception.py:806
#, python-format
msgid "Host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:802
+#: nova/exception.py:810
#, python-format
msgid "Compute host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:806
+#: nova/exception.py:814
#, python-format
msgid "Could not find binary %(binary)s on host %(host)s."
msgstr ""
-#: nova/exception.py:810
+#: nova/exception.py:818
#, python-format
msgid "Invalid reservation expiration %(expire)s."
msgstr ""
-#: nova/exception.py:814
+#: nova/exception.py:822
#, python-format
msgid ""
"Change would make usage less than 0 for the following resources: "
"%(unders)s"
msgstr ""
-#: nova/exception.py:819
+#: nova/exception.py:827
+#, python-format
+msgid "Wrong quota method %(method)s used on resource %(res)s"
+msgstr ""
+
+#: nova/exception.py:831
msgid "Quota could not be found"
msgstr ""
-#: nova/exception.py:823
+#: nova/exception.py:835
#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr ""
-#: nova/exception.py:828
+#: nova/exception.py:840
#, python-format
msgid "Unknown quota resources %(unknown)s."
msgstr ""
-#: nova/exception.py:832
+#: nova/exception.py:844
#, python-format
msgid "Quota for user %(user_id)s in project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:837
+#: nova/exception.py:849
#, python-format
msgid "Quota for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:841
+#: nova/exception.py:853
#, python-format
msgid "Quota class %(class_name)s could not be found."
msgstr ""
-#: nova/exception.py:845
+#: nova/exception.py:857
#, python-format
msgid "Quota usage for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:849
+#: nova/exception.py:861
#, python-format
msgid "Quota reservation %(uuid)s could not be found."
msgstr ""
-#: nova/exception.py:853
+#: nova/exception.py:865
#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr ""
-#: nova/exception.py:857
+#: nova/exception.py:869
#, python-format
msgid "Security group %(security_group_id)s not found."
msgstr ""
-#: nova/exception.py:861
+#: nova/exception.py:873
#, python-format
msgid "Security group %(security_group_id)s not found for project %(project_id)s."
msgstr ""
-#: nova/exception.py:866
+#: nova/exception.py:878
#, python-format
msgid "Security group with rule %(rule_id)s not found."
msgstr ""
-#: nova/exception.py:871
+#: nova/exception.py:883
#, python-format
msgid ""
"Security group %(security_group_name)s already exists for project "
"%(project_id)s."
msgstr ""
-#: nova/exception.py:876
+#: nova/exception.py:888
#, python-format
msgid ""
"Security group %(security_group_id)s is already associated with the "
"instance %(instance_id)s"
msgstr ""
-#: nova/exception.py:881
+#: nova/exception.py:893
#, python-format
msgid ""
"Security group %(security_group_id)s is not associated with the instance "
"%(instance_id)s"
msgstr ""
-#: nova/exception.py:886
+#: nova/exception.py:898
#, python-format
msgid "Security group default rule (%rule_id)s not found."
msgstr ""
-#: nova/exception.py:890
+#: nova/exception.py:902
msgid ""
"Network requires port_security_enabled and subnet associated in order to "
"apply security groups."
msgstr ""
-#: nova/exception.py:896
+#: nova/exception.py:908
#, python-format
msgid "Rule already exists in group: %(rule)s"
msgstr ""
-#: nova/exception.py:900
+#: nova/exception.py:912
msgid "No Unique Match Found."
msgstr ""
-#: nova/exception.py:905
+#: nova/exception.py:917
#, python-format
msgid "Migration %(migration_id)s could not be found."
msgstr ""
-#: nova/exception.py:909
+#: nova/exception.py:921
#, python-format
msgid "Migration not found for instance %(instance_id)s with status %(status)s."
msgstr ""
-#: nova/exception.py:914
+#: nova/exception.py:926
#, python-format
msgid "Console pool %(pool_id)s could not be found."
msgstr ""
-#: nova/exception.py:918
+#: nova/exception.py:930
#, python-format
msgid ""
"Console pool with host %(host)s, console_type %(console_type)s and "
"compute_host %(compute_host)s already exists."
msgstr ""
-#: nova/exception.py:924
+#: nova/exception.py:936
#, python-format
msgid ""
"Console pool of type %(console_type)s for compute host %(compute_host)s "
"on proxy host %(host)s not found."
msgstr ""
-#: nova/exception.py:930
+#: nova/exception.py:942
#, python-format
msgid "Console %(console_id)s could not be found."
msgstr ""
-#: nova/exception.py:934
+#: nova/exception.py:946
#, python-format
msgid "Console for instance %(instance_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:938
+#: nova/exception.py:950
#, python-format
msgid ""
"Console for instance %(instance_uuid)s in pool %(pool_id)s could not be "
"found."
msgstr ""
-#: nova/exception.py:943
+#: nova/exception.py:955
#, python-format
msgid "Invalid console type %(console_type)s"
msgstr ""
-#: nova/exception.py:947
+#: nova/exception.py:959
#, python-format
msgid "Unavailable console type %(console_type)s."
msgstr ""
-#: nova/exception.py:951
+#: nova/exception.py:963
#, python-format
msgid "The console port range %(min_port)d-%(max_port)d is exhausted."
msgstr ""
-#: nova/exception.py:956
+#: nova/exception.py:968
#, python-format
msgid "Flavor %(flavor_id)s could not be found."
msgstr ""
-#: nova/exception.py:960
+#: nova/exception.py:972
#, python-format
msgid "Flavor with name %(flavor_name)s could not be found."
msgstr ""
-#: nova/exception.py:964
+#: nova/exception.py:976
#, python-format
msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination."
msgstr ""
-#: nova/exception.py:969
+#: nova/exception.py:981
+#, python-format
+msgid ""
+"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
+"retries."
+msgstr ""
+
+#: nova/exception.py:986
#, python-format
msgid "Cell %(cell_name)s doesn't exist."
msgstr ""
-#: nova/exception.py:973
+#: nova/exception.py:990
#, python-format
msgid "Cell with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:977
+#: nova/exception.py:994
#, python-format
msgid "Inconsistency in cell routing: %(reason)s"
msgstr ""
-#: nova/exception.py:981
+#: nova/exception.py:998
#, python-format
msgid "Service API method not found: %(detail)s"
msgstr ""
-#: nova/exception.py:985
+#: nova/exception.py:1002
msgid "Timeout waiting for response from cell"
msgstr ""
-#: nova/exception.py:989
+#: nova/exception.py:1006
#, python-format
msgid "Cell message has reached maximum hop count: %(hop_count)s"
msgstr ""
-#: nova/exception.py:993
+#: nova/exception.py:1010
msgid "No cells available matching scheduling criteria."
msgstr ""
-#: nova/exception.py:997
+#: nova/exception.py:1014
msgid "Cannot update cells configuration file."
msgstr ""
-#: nova/exception.py:1001
+#: nova/exception.py:1018
#, python-format
msgid "Cell is not known for instance %(instance_uuid)s"
msgstr ""
-#: nova/exception.py:1005
+#: nova/exception.py:1022
#, python-format
msgid "Scheduler Host Filter %(filter_name)s could not be found."
msgstr ""
-#: nova/exception.py:1009
+#: nova/exception.py:1026
#, python-format
msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s."
msgstr ""
-#: nova/exception.py:1014
+#: nova/exception.py:1031
#, python-format
msgid ""
"Metric %(name)s could not be found on the compute host node "
"%(host)s.%(node)s."
msgstr ""
-#: nova/exception.py:1019
+#: nova/exception.py:1036
#, python-format
msgid "File %(file_path)s could not be found."
msgstr ""
-#: nova/exception.py:1023
+#: nova/exception.py:1040
msgid "Zero files could be found."
msgstr ""
-#: nova/exception.py:1027
+#: nova/exception.py:1044
#, python-format
msgid "Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
-#: nova/exception.py:1032
+#: nova/exception.py:1049
#, python-format
msgid "Network adapter %(adapter)s could not be found."
msgstr ""
-#: nova/exception.py:1036
+#: nova/exception.py:1053
#, python-format
msgid "Class %(class_name)s could not be found: %(exception)s"
msgstr ""
-#: nova/exception.py:1040
+#: nova/exception.py:1057
msgid "Action not allowed."
msgstr ""
-#: nova/exception.py:1044
+#: nova/exception.py:1061
msgid "Rotation is not allowed for snapshots"
msgstr ""
-#: nova/exception.py:1048
+#: nova/exception.py:1065
msgid "Rotation param is required for backup image_type"
msgstr ""
-#: nova/exception.py:1053 nova/tests/compute/test_keypairs.py:144
+#: nova/exception.py:1070 nova/tests/compute/test_keypairs.py:146
#, python-format
msgid "Key pair '%(key_name)s' already exists."
msgstr ""
-#: nova/exception.py:1057
+#: nova/exception.py:1074
#, python-format
msgid "Instance %(name)s already exists."
msgstr ""
-#: nova/exception.py:1061
+#: nova/exception.py:1078
#, python-format
msgid "Flavor with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:1065
+#: nova/exception.py:1082
#, python-format
msgid "Flavor with ID %(flavor_id)s already exists."
msgstr ""
-#: nova/exception.py:1069
+#: nova/exception.py:1086
#, python-format
msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
-#: nova/exception.py:1074
+#: nova/exception.py:1091
#, python-format
msgid "%(path)s is not on shared storage: %(reason)s"
msgstr ""
-#: nova/exception.py:1078
+#: nova/exception.py:1095
#, python-format
msgid "%(path)s is not on local storage: %(reason)s"
msgstr ""
-#: nova/exception.py:1082
+#: nova/exception.py:1099
#, python-format
msgid "Storage error: %(reason)s"
msgstr ""
-#: nova/exception.py:1086
+#: nova/exception.py:1103
#, python-format
msgid "Migration error: %(reason)s"
msgstr ""
-#: nova/exception.py:1090
+#: nova/exception.py:1107
#, python-format
msgid "Migration pre-check error: %(reason)s"
msgstr ""
-#: nova/exception.py:1094
+#: nova/exception.py:1111
#, python-format
msgid "Malformed message body: %(reason)s"
msgstr ""
-#: nova/exception.py:1100
+#: nova/exception.py:1117
#, python-format
msgid "Could not find config at %(path)s"
msgstr ""
-#: nova/exception.py:1104
+#: nova/exception.py:1121
#, python-format
msgid "Could not load paste app '%(name)s' from %(path)s"
msgstr ""
-#: nova/exception.py:1108
+#: nova/exception.py:1125
msgid "When resizing, instances must change flavor!"
msgstr ""
-#: nova/exception.py:1112
+#: nova/exception.py:1129
#, python-format
msgid "Resize error: %(reason)s"
msgstr ""
-#: nova/exception.py:1116
+#: nova/exception.py:1133
#, python-format
msgid "Server disk was unable to be resized because: %(reason)s"
msgstr ""
-#: nova/exception.py:1120
+#: nova/exception.py:1137
msgid "Flavor's memory is too small for requested image."
msgstr ""
-#: nova/exception.py:1124
+#: nova/exception.py:1141
msgid "Flavor's disk is too small for requested image."
msgstr ""
-#: nova/exception.py:1128
+#: nova/exception.py:1145
#, python-format
msgid "Insufficient free memory on compute node to start %(uuid)s."
msgstr ""
-#: nova/exception.py:1132
+#: nova/exception.py:1149
#, python-format
msgid "No valid host was found. %(reason)s"
msgstr ""
-#: nova/exception.py:1137
+#: nova/exception.py:1154
#, python-format
msgid "Quota exceeded: code=%(code)s"
msgstr ""
-#: nova/exception.py:1144
+#: nova/exception.py:1161
#, python-format
msgid ""
"Quota exceeded for %(overs)s: Requested %(req)s, but already used "
"%(used)d of %(allowed)d %(resource)s"
msgstr ""
-#: nova/exception.py:1149
+#: nova/exception.py:1166
msgid "Maximum number of floating ips exceeded"
msgstr ""
-#: nova/exception.py:1153
+#: nova/exception.py:1170
msgid "Maximum number of fixed ips exceeded"
msgstr ""
-#: nova/exception.py:1157
+#: nova/exception.py:1174
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr ""
-#: nova/exception.py:1161
+#: nova/exception.py:1178
msgid "Personality file limit exceeded"
msgstr ""
-#: nova/exception.py:1165
+#: nova/exception.py:1182
msgid "Personality file path too long"
msgstr ""
-#: nova/exception.py:1169
+#: nova/exception.py:1186
msgid "Personality file content too long"
msgstr ""
-#: nova/exception.py:1173 nova/tests/compute/test_keypairs.py:155
+#: nova/exception.py:1190 nova/tests/compute/test_keypairs.py:157
msgid "Maximum number of key pairs exceeded"
msgstr ""
-#: nova/exception.py:1178
+#: nova/exception.py:1195
msgid "Maximum number of security groups or rules exceeded"
msgstr ""
-#: nova/exception.py:1182
+#: nova/exception.py:1199
msgid "Maximum number of ports exceeded"
msgstr ""
-#: nova/exception.py:1186
+#: nova/exception.py:1203
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: "
"%(reason)s."
msgstr ""
-#: nova/exception.py:1191
+#: nova/exception.py:1208
#, python-format
msgid "Aggregate %(aggregate_id)s could not be found."
msgstr ""
-#: nova/exception.py:1195
+#: nova/exception.py:1212
#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr ""
-#: nova/exception.py:1199
+#: nova/exception.py:1216
#, python-format
msgid "Aggregate %(aggregate_id)s has no host %(host)s."
msgstr ""
-#: nova/exception.py:1203
+#: nova/exception.py:1220
#, python-format
msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:1208
+#: nova/exception.py:1225
#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr ""
-#: nova/exception.py:1212
+#: nova/exception.py:1229
msgid "Unable to create flavor"
msgstr ""
-#: nova/exception.py:1216
+#: nova/exception.py:1233
#, python-format
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr ""
-#: nova/exception.py:1222
+#: nova/exception.py:1239
#, python-format
msgid "Detected existing vlan with id %(vlan)d"
msgstr ""
-#: nova/exception.py:1226
+#: nova/exception.py:1243
msgid "There was a conflict when trying to complete your request."
msgstr ""
-#: nova/exception.py:1232
+#: nova/exception.py:1249
#, python-format
msgid "Instance %(instance_id)s could not be found."
msgstr ""
-#: nova/exception.py:1236
+#: nova/exception.py:1253
#, python-format
msgid "Info cache for instance %(instance_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:1241
+#: nova/exception.py:1258
#, python-format
msgid "Node %(node_id)s could not be found."
msgstr ""
-#: nova/exception.py:1245
+#: nova/exception.py:1262
#, python-format
msgid "Node with UUID %(node_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:1249
+#: nova/exception.py:1266
#, python-format
msgid "Marker %(marker)s could not be found."
msgstr ""
-#: nova/exception.py:1254
+#: nova/exception.py:1271
#, python-format
msgid "Invalid id: %(val)s (expecting \"i-...\")."
msgstr ""
-#: nova/exception.py:1258
+#: nova/exception.py:1275
#, python-format
msgid "Could not fetch image %(image_id)s"
msgstr ""
-#: nova/exception.py:1262
+#: nova/exception.py:1279
#, python-format
msgid "Could not upload image %(image_id)s"
msgstr ""
-#: nova/exception.py:1266
+#: nova/exception.py:1283
#, python-format
msgid "Task %(task_name)s is already running on host %(host)s"
msgstr ""
-#: nova/exception.py:1270
+#: nova/exception.py:1287
#, python-format
msgid "Task %(task_name)s is not running on host %(host)s"
msgstr ""
-#: nova/exception.py:1274
+#: nova/exception.py:1291
#, python-format
msgid "Instance %(instance_uuid)s is locked"
msgstr ""
-#: nova/exception.py:1278
+#: nova/exception.py:1295
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr ""
-#: nova/exception.py:1282
+#: nova/exception.py:1299
#, python-format
msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s"
msgstr ""
-#: nova/exception.py:1287
+#: nova/exception.py:1304
#, python-format
msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat."
msgstr ""
-#: nova/exception.py:1292
+#: nova/exception.py:1309
#, python-format
-msgid "Failed to attach network adapter device to %(instance)s"
+msgid "Failed to attach network adapter device to %(instance_uuid)s"
msgstr ""
-#: nova/exception.py:1296
+#: nova/exception.py:1314
#, python-format
-msgid "Failed to detach network adapter device from %(instance)s"
+msgid "Failed to detach network adapter device from %(instance_uuid)s"
msgstr ""
-#: nova/exception.py:1300
+#: nova/exception.py:1319
#, python-format
msgid ""
"User data too large. User data must be no larger than %(maxsize)s bytes "
"once base64 encoded. Your data is %(length)d bytes"
msgstr ""
-#: nova/exception.py:1306
+#: nova/exception.py:1325
msgid "User data needs to be valid base 64."
msgstr ""
-#: nova/exception.py:1310
+#: nova/exception.py:1329
#, python-format
msgid ""
"Unexpected task state: expecting %(expected)s but the actual state is "
"%(actual)s"
msgstr ""
-#: nova/exception.py:1319
+#: nova/exception.py:1338
#, python-format
msgid ""
"Action for request_id %(request_id)s on instance %(instance_uuid)s not "
"found"
msgstr ""
-#: nova/exception.py:1324
+#: nova/exception.py:1343
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr ""
-#: nova/exception.py:1328
+#: nova/exception.py:1347
#, python-format
msgid ""
"Unexpected VM state: expecting %(expected)s but the actual state is "
"%(actual)s"
msgstr ""
-#: nova/exception.py:1333
+#: nova/exception.py:1352
#, python-format
msgid "The CA file for %(project)s could not be found"
msgstr ""
-#: nova/exception.py:1337
+#: nova/exception.py:1356
#, python-format
msgid "The CRL file for %(project)s could not be found"
msgstr ""
-#: nova/exception.py:1341
+#: nova/exception.py:1360
msgid "Instance recreate is not supported."
msgstr ""
-#: nova/exception.py:1345
+#: nova/exception.py:1364
#, python-format
msgid ""
"The service from servicegroup driver %(driver)s is temporarily "
"unavailable."
msgstr ""
-#: nova/exception.py:1350
+#: nova/exception.py:1369
#, python-format
msgid "%(binary)s attempted direct database access which is not allowed by policy"
msgstr ""
-#: nova/exception.py:1355
+#: nova/exception.py:1374
#, python-format
msgid "Virtualization type '%(virt)s' is not supported by this compute driver"
msgstr ""
-#: nova/exception.py:1360
+#: nova/exception.py:1379
#, python-format
msgid ""
"Requested hardware '%(model)s' is not supported by the '%(virt)s' virt "
"driver"
msgstr ""
-#: nova/exception.py:1365
+#: nova/exception.py:1384
#, python-format
msgid "Invalid Base 64 data for file %(path)s"
msgstr ""
-#: nova/exception.py:1369
+#: nova/exception.py:1388
#, python-format
msgid "Build of instance %(instance_uuid)s aborted: %(reason)s"
msgstr ""
-#: nova/exception.py:1373
+#: nova/exception.py:1392
#, python-format
msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s"
msgstr ""
-#: nova/exception.py:1378
+#: nova/exception.py:1397
#, python-format
msgid "Shadow table with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:1383
+#: nova/exception.py:1402
#, python-format
msgid "Instance rollback performed due to: %s"
msgstr ""
-#: nova/exception.py:1389
+#: nova/exception.py:1408
#, python-format
msgid "Unsupported object type %(objtype)s"
msgstr ""
-#: nova/exception.py:1393
+#: nova/exception.py:1412
#, python-format
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr ""
-#: nova/exception.py:1397
+#: nova/exception.py:1416
#, python-format
msgid "Version %(objver)s of %(objname)s is not supported"
msgstr ""
-#: nova/exception.py:1401
+#: nova/exception.py:1420
#, python-format
msgid "Cannot modify readonly field %(field)s"
msgstr ""
-#: nova/exception.py:1405
+#: nova/exception.py:1424
#, python-format
msgid "Object action %(action)s failed because: %(reason)s"
msgstr ""
-#: nova/exception.py:1409
+#: nova/exception.py:1428
#, python-format
msgid "Field %(field)s of %(objname)s is not an instance of Field"
msgstr ""
-#: nova/exception.py:1413
+#: nova/exception.py:1432
#, python-format
msgid "Core API extensions are missing: %(missing_apis)s"
msgstr ""
-#: nova/exception.py:1417
+#: nova/exception.py:1436
#, python-format
msgid "Error during following call to agent: %(method)s"
msgstr ""
-#: nova/exception.py:1421
+#: nova/exception.py:1440
#, python-format
msgid "Unable to contact guest agent. The following call timed out: %(method)s"
msgstr ""
-#: nova/exception.py:1426
+#: nova/exception.py:1445
#, python-format
msgid "Agent does not support the call: %(method)s"
msgstr ""
-#: nova/exception.py:1430
+#: nova/exception.py:1449
#, python-format
msgid "Instance group %(group_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:1434
+#: nova/exception.py:1453
#, python-format
msgid "Instance group %(group_uuid)s already exists."
msgstr ""
-#: nova/exception.py:1438
+#: nova/exception.py:1457
#, python-format
msgid "Instance group %(group_uuid)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:1443
+#: nova/exception.py:1462
#, python-format
msgid "Instance group %(group_uuid)s has no member with id %(instance_id)s."
msgstr ""
-#: nova/exception.py:1448
+#: nova/exception.py:1467
#, python-format
msgid "Instance group %(group_uuid)s has no policy %(policy)s."
msgstr ""
-#: nova/exception.py:1452
+#: nova/exception.py:1471
#, python-format
msgid "Number of retries to plugin (%(num_retries)d) exceeded."
msgstr ""
-#: nova/exception.py:1456
+#: nova/exception.py:1475
#, python-format
msgid "There was an error with the download module %(module)s. %(reason)s"
msgstr ""
-#: nova/exception.py:1461
+#: nova/exception.py:1480
#, python-format
msgid ""
"The metadata for this location will not work with this module %(module)s."
" %(reason)s."
msgstr ""
-#: nova/exception.py:1466
+#: nova/exception.py:1485
#, python-format
msgid "The method %(method_name)s is not implemented."
msgstr ""
-#: nova/exception.py:1470
+#: nova/exception.py:1489
#, python-format
msgid "The module %(module)s is misconfigured: %(reason)s."
msgstr ""
-#: nova/exception.py:1474
+#: nova/exception.py:1493
#, python-format
msgid "Error when creating resource monitor: %(monitor)s"
msgstr ""
-#: nova/exception.py:1478
+#: nova/exception.py:1497
#, python-format
msgid "The PCI address %(address)s has an incorrect format."
msgstr ""
-#: nova/exception.py:1482
+#: nova/exception.py:1501
+#, python-format
+msgid ""
+"Invalid PCI Whitelist: The PCI address %(address)s has an invalid "
+"%(field)s."
+msgstr ""
+
+#: nova/exception.py:1506
+msgid ""
+"Invalid PCI Whitelist: The PCI whitelist can specify devname or address, "
+"but not both"
+msgstr ""
+
+#: nova/exception.py:1512
#, python-format
msgid "PCI device %(id)s not found"
msgstr ""
-#: nova/exception.py:1486
+#: nova/exception.py:1516
#, python-format
msgid "PCI Device %(node_id)s:%(address)s not found."
msgstr ""
-#: nova/exception.py:1490
+#: nova/exception.py:1520
#, python-format
msgid ""
"PCI device %(compute_node_id)s:%(address)s is %(status)s instead of "
"%(hopestatus)s"
msgstr ""
-#: nova/exception.py:1496
+#: nova/exception.py:1526
#, python-format
msgid ""
"PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead "
"of %(hopeowner)s"
msgstr ""
-#: nova/exception.py:1502
+#: nova/exception.py:1532
#, python-format
msgid "PCI device request (%requests)s failed"
msgstr ""
-#: nova/exception.py:1507
+#: nova/exception.py:1537
#, python-format
msgid ""
"Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty "
"pool"
msgstr ""
-#: nova/exception.py:1513
+#: nova/exception.py:1543
#, python-format
msgid "Invalid PCI alias definition: %(reason)s"
msgstr ""
-#: nova/exception.py:1517
+#: nova/exception.py:1547
#, python-format
msgid "PCI alias %(alias)s is not defined"
msgstr ""
-#: nova/exception.py:1522
+#: nova/exception.py:1552
#, python-format
msgid "Not enough parameters: %(reason)s"
msgstr ""
-#: nova/exception.py:1527
+#: nova/exception.py:1557
#, python-format
msgid "Invalid PCI devices Whitelist config %(reason)s"
msgstr ""
-#: nova/exception.py:1531
+#: nova/exception.py:1561
#, python-format
msgid "Cannot change %(node_id)s to %(new_node_id)s"
msgstr ""
-#: nova/exception.py:1541
+#: nova/exception.py:1571
#, python-format
msgid ""
"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
"%(reason)s"
msgstr ""
-#: nova/exception.py:1546
+#: nova/exception.py:1576
#, python-format
msgid "Failed to detach PCI device %(dev)s: %(reason)s"
msgstr ""
-#: nova/exception.py:1550
+#: nova/exception.py:1580
#, python-format
msgid "%(type)s hypervisor does not support PCI devices"
msgstr ""
-#: nova/exception.py:1554
+#: nova/exception.py:1584
#, python-format
msgid "Key manager error: %(reason)s"
msgstr ""
-#: nova/exception.py:1558
+#: nova/exception.py:1588
#, python-format
msgid "Failed to remove volume(s): (%(reason)s)"
msgstr ""
-#: nova/exception.py:1562
+#: nova/exception.py:1592
#, python-format
msgid "Provided video model (%(model)s) is not supported."
msgstr ""
-#: nova/exception.py:1566
+#: nova/exception.py:1596
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr ""
-#: nova/exception.py:1571
+#: nova/exception.py:1601
#, python-format
msgid ""
"The requested amount of video memory %(req_vram)d is higher than the "
"maximum allowed by flavor %(max_vram)d."
msgstr ""
-#: nova/exception.py:1576
+#: nova/exception.py:1606
#, python-format
msgid "Provided watchdog action (%(action)s) is not supported."
msgstr ""
-#: nova/exception.py:1580
+#: nova/exception.py:1610
+msgid ""
+"Live migration of instances with config drives is not supported in "
+"libvirt unless libvirt instance path and drive data is shared across "
+"compute nodes."
+msgstr ""
+
+#: nova/exception.py:1616
+#, python-format
msgid ""
-"Block migration of instances with config drives is not supported in "
-"libvirt."
+"Host %(server)s is running an old version of Nova, live migrations "
+"involving that version may cause data loss. Upgrade Nova on %(server)s "
+"and try again."
msgstr ""
-#: nova/exception.py:1585
+#: nova/exception.py:1622
#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr ""
+#: nova/exception.py:1626
+#, python-format
+msgid ""
+"Image vCPU limits %(sockets)d:%(cores)d:%(threads)d exceeds permitted "
+"%(maxsockets)d:%(maxcores)d:%(maxthreads)d"
+msgstr ""
+
+#: nova/exception.py:1631
+#, python-format
+msgid ""
+"Image vCPU topology %(sockets)d:%(cores)d:%(threads)d exceeds permitted "
+"%(maxsockets)d:%(maxcores)d:%(maxthreads)d"
+msgstr ""
+
+#: nova/exception.py:1636
+#, python-format
+msgid ""
+"Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to"
+" satisfy for vcpus count %(vcpus)d"
+msgstr ""
+
+#: nova/exception.py:1641
+#, python-format
+msgid "Architecture name '%(arch)s' is not recognised"
+msgstr ""
+
+#: nova/exception.py:1645
+msgid "CPU and memory allocation must be provided for all NUMA nodes"
+msgstr ""
+
+#: nova/exception.py:1650
+#, python-format
+msgid ""
+"Image property '%(name)s' is not permitted to override NUMA configuration"
+" set against the flavor"
+msgstr ""
+
+#: nova/exception.py:1655
+msgid ""
+"Asymmetric NUMA topologies require explicit assignment of CPUs and memory"
+" to nodes in image or flavor"
+msgstr ""
+
+#: nova/exception.py:1660
+#, python-format
+msgid "CPU number %(cpunum)d is larger than max %(cpumax)d"
+msgstr ""
+
+#: nova/exception.py:1664
+#, python-format
+msgid "CPU number %(cpunum)d is assigned to two nodes"
+msgstr ""
+
+#: nova/exception.py:1668
+#, python-format
+msgid "CPU number %(cpuset)s is not assigned to any node"
+msgstr ""
+
+#: nova/exception.py:1672
+#, python-format
+msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB"
+msgstr ""
+
#: nova/filters.py:84
#, python-format
msgid "Filter %s returned 0 hosts"
@@ -1776,124 +1882,128 @@ msgstr ""
msgid "Failed to get nw_info"
msgstr ""
-#: nova/quota.py:1326
+#: nova/quota.py:1332
#, python-format
msgid "Failed to commit reservations %s"
msgstr ""
-#: nova/quota.py:1349
+#: nova/quota.py:1355
#, python-format
msgid "Failed to roll back reservations %s"
msgstr ""
-#: nova/service.py:160
+#: nova/service.py:161
#, python-format
msgid "Starting %(topic)s node (version %(version)s)"
msgstr ""
-#: nova/service.py:285
+#: nova/service.py:286
msgid "Service killed that has no database entry"
msgstr ""
-#: nova/service.py:297
+#: nova/service.py:298
msgid "Service error occurred during cleanup_host"
msgstr ""
-#: nova/service.py:314
+#: nova/service.py:315
#, python-format
msgid "Temporary directory is invalid: %s"
msgstr ""
-#: nova/service.py:339
+#: nova/service.py:340
#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr ""
-#: nova/service.py:424
+#: nova/service.py:433
msgid "serve() can only be called once"
msgstr ""
-#: nova/utils.py:148
+#: nova/utils.py:147
#, python-format
msgid "Expected to receive %(exp)s bytes, but actually %(act)s"
msgstr ""
-#: nova/utils.py:354
+#: nova/utils.py:353
#, python-format
msgid "Couldn't get IPv4 : %(ex)s"
msgstr ""
-#: nova/utils.py:370
+#: nova/utils.py:369
#, python-format
msgid "IPv4 address is not found.: %s"
msgstr ""
-#: nova/utils.py:373
+#: nova/utils.py:372
#, python-format
msgid "Couldn't get IPv4 of %(interface)s : %(ex)s"
msgstr ""
-#: nova/utils.py:388
+#: nova/utils.py:387
#, python-format
msgid "Link Local address is not found.:%s"
msgstr ""
-#: nova/utils.py:391
+#: nova/utils.py:390
#, python-format
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr ""
-#: nova/utils.py:412
+#: nova/utils.py:411
#, python-format
msgid "Invalid backend: %s"
msgstr ""
-#: nova/utils.py:457
+#: nova/utils.py:454
#, python-format
msgid "Expected object of type: %s"
msgstr ""
-#: nova/utils.py:485
+#: nova/utils.py:482
#, python-format
msgid "Invalid server_string: %s"
msgstr ""
-#: nova/utils.py:776 nova/virt/configdrive.py:177
+#: nova/utils.py:773
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr ""
+#: nova/utils.py:964
+msgid "The input is not a string or unicode"
+msgstr ""
+
#: nova/utils.py:966
#, python-format
msgid "%s is not a string or unicode"
msgstr ""
-#: nova/utils.py:970
+#: nova/utils.py:973
#, python-format
msgid "%(name)s has a minimum character requirement of %(min_length)s."
msgstr ""
-#: nova/utils.py:975
+#: nova/utils.py:978
#, python-format
msgid "%(name)s has more than %(max_length)s characters."
msgstr ""
-#: nova/utils.py:985
+#: nova/utils.py:988
#, python-format
msgid "%(value_name)s must be an integer"
msgstr ""
-#: nova/utils.py:991
+#: nova/utils.py:994
#, python-format
msgid "%(value_name)s must be >= %(min_value)d"
msgstr ""
-#: nova/utils.py:997
+#: nova/utils.py:1000
#, python-format
msgid "%(value_name)s must be <= %(max_value)d"
msgstr ""
-#: nova/utils.py:1031
+#: nova/utils.py:1034
#, python-format
msgid "Hypervisor version %s is invalid."
msgstr ""
@@ -1903,571 +2013,441 @@ msgstr ""
msgid "Failed to load %(cfgfile)s: %(ex)s"
msgstr ""
-#: nova/wsgi.py:132
+#: nova/wsgi.py:133
#, python-format
msgid "Could not bind to %(host)s:%(port)s"
msgstr ""
-#: nova/wsgi.py:137
+#: nova/wsgi.py:138
#, python-format
msgid "%(name)s listening on %(host)s:%(port)s"
msgstr ""
-#: nova/wsgi.py:152 nova/openstack/common/sslutils.py:50
+#: nova/wsgi.py:159 nova/openstack/common/sslutils.py:47
#, python-format
msgid "Unable to find cert_file : %s"
msgstr ""
-#: nova/wsgi.py:156 nova/openstack/common/sslutils.py:53
+#: nova/wsgi.py:163 nova/openstack/common/sslutils.py:50
#, python-format
msgid "Unable to find ca_file : %s"
msgstr ""
-#: nova/wsgi.py:160 nova/openstack/common/sslutils.py:56
+#: nova/wsgi.py:167 nova/openstack/common/sslutils.py:53
#, python-format
msgid "Unable to find key_file : %s"
msgstr ""
-#: nova/wsgi.py:164 nova/openstack/common/sslutils.py:59
+#: nova/wsgi.py:171 nova/openstack/common/sslutils.py:56
msgid ""
"When running server in SSL mode, you must specify both a cert_file and "
"key_file option value in your configuration file"
msgstr ""
-#: nova/wsgi.py:195
+#: nova/wsgi.py:202
#, python-format
msgid "Failed to start %(name)s on %(host)s:%(port)s with SSL support"
msgstr ""
-#: nova/wsgi.py:223
+#: nova/wsgi.py:238
msgid "Stopping WSGI server."
msgstr ""
-#: nova/wsgi.py:242
+#: nova/wsgi.py:258
msgid "WSGI server has stopped."
msgstr ""
-#: nova/wsgi.py:311
+#: nova/wsgi.py:327
msgid "You must implement __call__"
msgstr ""
-#: nova/api/auth.py:72
-msgid "ratelimit_v3 is removed from v3 api."
-msgstr ""
-
-#: nova/api/auth.py:135
+#: nova/api/auth.py:136
msgid "Invalid service catalog json."
msgstr ""
-#: nova/api/auth.py:159
-msgid "Sourcing roles from deprecated X-Role HTTP header"
-msgstr ""
-
#: nova/api/sizelimit.py:53 nova/api/sizelimit.py:62 nova/api/sizelimit.py:76
#: nova/api/metadata/password.py:62
msgid "Request is too large."
msgstr ""
-#: nova/api/ec2/__init__.py:88
+#: nova/api/ec2/__init__.py:89
#, python-format
msgid "FaultWrapper: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:159
+#: nova/api/ec2/__init__.py:160
msgid "Too many failed authentications."
msgstr ""
-#: nova/api/ec2/__init__.py:168
-#, python-format
-msgid ""
-"Access key %(access_key)s has had %(failures)d failed authentications and"
-" will be locked out for %(lock_mins)d minutes."
-msgstr ""
-
-#: nova/api/ec2/__init__.py:187
+#: nova/api/ec2/__init__.py:188
msgid "Signature not provided"
msgstr ""
-#: nova/api/ec2/__init__.py:192
+#: nova/api/ec2/__init__.py:193
msgid "Access key not provided"
msgstr ""
-#: nova/api/ec2/__init__.py:228 nova/api/ec2/__init__.py:244
+#: nova/api/ec2/__init__.py:229 nova/api/ec2/__init__.py:245
msgid "Failure communicating with keystone"
msgstr ""
-#: nova/api/ec2/__init__.py:304
+#: nova/api/ec2/__init__.py:305
msgid "Timestamp failed validation."
msgstr ""
-#: nova/api/ec2/__init__.py:402
+#: nova/api/ec2/__init__.py:403
#, python-format
msgid "Unauthorized request for controller=%(controller)s and action=%(action)s"
msgstr ""
-#: nova/api/ec2/__init__.py:492
-#, python-format
-msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
-msgstr ""
-
-#: nova/api/ec2/__init__.py:495
-#, python-format
-msgid "%(ex_name)s raised: %(ex_str)s"
-msgstr ""
-
-#: nova/api/ec2/__init__.py:519
-#, python-format
-msgid "Environment: %s"
-msgstr ""
-
-#: nova/api/ec2/__init__.py:521
+#: nova/api/ec2/__init__.py:522
msgid "Unknown error occurred."
msgstr ""
-#: nova/api/ec2/cloud.py:395
+#: nova/api/ec2/cloud.py:391
#, python-format
msgid "Create snapshot of volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:420
+#: nova/api/ec2/cloud.py:418
#, python-format
msgid "Could not find key pair(s): %s"
msgstr ""
-#: nova/api/ec2/cloud.py:436
+#: nova/api/ec2/cloud.py:434
#, python-format
msgid "Create key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:448
+#: nova/api/ec2/cloud.py:446
#, python-format
msgid "Import key %s"
msgstr ""
-#: nova/api/ec2/cloud.py:461
+#: nova/api/ec2/cloud.py:459
#, python-format
msgid "Delete key pair %s"
msgstr ""
-#: nova/api/ec2/cloud.py:603 nova/api/ec2/cloud.py:733
+#: nova/api/ec2/cloud.py:601 nova/api/ec2/cloud.py:731
msgid "need group_name or group_id"
msgstr ""
-#: nova/api/ec2/cloud.py:608
+#: nova/api/ec2/cloud.py:606
msgid "can't build a valid rule"
msgstr ""
-#: nova/api/ec2/cloud.py:616
+#: nova/api/ec2/cloud.py:614
#, python-format
msgid "Invalid IP protocol %(protocol)s"
msgstr ""
-#: nova/api/ec2/cloud.py:650 nova/api/ec2/cloud.py:686
+#: nova/api/ec2/cloud.py:648 nova/api/ec2/cloud.py:684
msgid "No rule for the specified parameters."
msgstr ""
-#: nova/api/ec2/cloud.py:764
+#: nova/api/ec2/cloud.py:762
#, python-format
msgid "Get console output for instance %s"
msgstr ""
-#: nova/api/ec2/cloud.py:836
+#: nova/api/ec2/cloud.py:834
#, python-format
msgid "Create volume from snapshot %s"
msgstr ""
-#: nova/api/ec2/cloud.py:840 nova/api/openstack/compute/contrib/volumes.py:243
+#: nova/api/ec2/cloud.py:838 nova/api/openstack/compute/contrib/volumes.py:243
#, python-format
msgid "Create volume of %s GB"
msgstr ""
-#: nova/api/ec2/cloud.py:880
+#: nova/api/ec2/cloud.py:878
#, python-format
msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
msgstr ""
-#: nova/api/ec2/cloud.py:910 nova/api/openstack/compute/contrib/volumes.py:506
+#: nova/api/ec2/cloud.py:908 nova/api/openstack/compute/contrib/volumes.py:506
#, python-format
msgid "Detach volume %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1242
+#: nova/api/ec2/cloud.py:1262
msgid "Allocate address"
msgstr ""
-#: nova/api/ec2/cloud.py:1247
+#: nova/api/ec2/cloud.py:1267
#, python-format
msgid "Release address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1252
+#: nova/api/ec2/cloud.py:1272
#, python-format
msgid "Associate address %(public_ip)s to instance %(instance_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1262
+#: nova/api/ec2/cloud.py:1282
msgid "Unable to associate IP Address, no fixed_ips."
msgstr ""
-#: nova/api/ec2/cloud.py:1270
-#: nova/api/openstack/compute/contrib/floating_ips.py:249
-#, python-format
-msgid "multiple fixed_ips exist, using the first: %s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1283
+#: nova/api/ec2/cloud.py:1303
#, python-format
msgid "Disassociate address %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1300 nova/api/openstack/compute/servers.py:918
+#: nova/api/ec2/cloud.py:1320 nova/api/openstack/compute/servers.py:920
#: nova/api/openstack/compute/plugins/v3/multiple_create.py:64
msgid "min_count must be <= max_count"
msgstr ""
-#: nova/api/ec2/cloud.py:1332
+#: nova/api/ec2/cloud.py:1352
msgid "Image must be available"
msgstr ""
-#: nova/api/ec2/cloud.py:1429
+#: nova/api/ec2/cloud.py:1452
#, python-format
msgid "Reboot instance %r"
msgstr ""
-#: nova/api/ec2/cloud.py:1542
+#: nova/api/ec2/cloud.py:1567
#, python-format
msgid "De-registering image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1558
+#: nova/api/ec2/cloud.py:1583
msgid "imageLocation is required"
msgstr ""
-#: nova/api/ec2/cloud.py:1578
+#: nova/api/ec2/cloud.py:1603
#, python-format
msgid "Registered image %(image_location)s with id %(image_id)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1639
+#: nova/api/ec2/cloud.py:1664
msgid "user or group not specified"
msgstr ""
-#: nova/api/ec2/cloud.py:1642
+#: nova/api/ec2/cloud.py:1667
msgid "only group \"all\" is supported"
msgstr ""
-#: nova/api/ec2/cloud.py:1645
+#: nova/api/ec2/cloud.py:1670
msgid "operation_type must be add or remove"
msgstr ""
-#: nova/api/ec2/cloud.py:1647
+#: nova/api/ec2/cloud.py:1672
#, python-format
msgid "Updating image %s publicity"
msgstr ""
-#: nova/api/ec2/cloud.py:1660
+#: nova/api/ec2/cloud.py:1685
#, python-format
msgid "Not allowed to modify attributes for image %s"
msgstr ""
-#: nova/api/ec2/cloud.py:1686
+#: nova/api/ec2/cloud.py:1715
#, python-format
msgid ""
"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not "
"have a volume attached at root (%(root)s)"
msgstr ""
-#: nova/api/ec2/cloud.py:1717
+#: nova/api/ec2/cloud.py:1748
#, python-format
-msgid "Couldn't stop instance within %d sec"
+msgid ""
+"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
+"%(vm_state)s, current task_state: %(task_state)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1736
+#: nova/api/ec2/cloud.py:1772
#, python-format
msgid "image of %(instance)s at %(now)s"
msgstr ""
-#: nova/api/ec2/cloud.py:1761 nova/api/ec2/cloud.py:1811
+#: nova/api/ec2/cloud.py:1797 nova/api/ec2/cloud.py:1847
msgid "resource_id and tag are required"
msgstr ""
-#: nova/api/ec2/cloud.py:1765 nova/api/ec2/cloud.py:1815
+#: nova/api/ec2/cloud.py:1801 nova/api/ec2/cloud.py:1851
msgid "Expecting a list of resources"
msgstr ""
-#: nova/api/ec2/cloud.py:1770 nova/api/ec2/cloud.py:1820
-#: nova/api/ec2/cloud.py:1878
+#: nova/api/ec2/cloud.py:1806 nova/api/ec2/cloud.py:1856
+#: nova/api/ec2/cloud.py:1914
msgid "Only instances implemented"
msgstr ""
-#: nova/api/ec2/cloud.py:1774 nova/api/ec2/cloud.py:1824
+#: nova/api/ec2/cloud.py:1810 nova/api/ec2/cloud.py:1860
msgid "Expecting a list of tagSets"
msgstr ""
-#: nova/api/ec2/cloud.py:1780 nova/api/ec2/cloud.py:1833
+#: nova/api/ec2/cloud.py:1816 nova/api/ec2/cloud.py:1869
msgid "Expecting tagSet to be key/value pairs"
msgstr ""
-#: nova/api/ec2/cloud.py:1787
+#: nova/api/ec2/cloud.py:1823
msgid "Expecting both key and value to be set"
msgstr ""
-#: nova/api/ec2/cloud.py:1838
+#: nova/api/ec2/cloud.py:1874
msgid "Expecting key to be set"
msgstr ""
-#: nova/api/ec2/cloud.py:1912
+#: nova/api/ec2/cloud.py:1948
msgid "Invalid CIDR"
msgstr ""
-#: nova/api/ec2/ec2utils.py:254
+#: nova/api/ec2/ec2utils.py:255
#, python-format
msgid "Unacceptable attach status:%s for ec2 API."
msgstr ""
-#: nova/api/ec2/ec2utils.py:277
+#: nova/api/ec2/ec2utils.py:278
msgid "Request must include either Timestamp or Expires, but cannot contain both"
msgstr ""
-#: nova/api/ec2/ec2utils.py:295
+#: nova/api/ec2/ec2utils.py:296
msgid "Timestamp is invalid."
msgstr ""
-#: nova/api/metadata/handler.py:111
-msgid ""
-"X-Instance-ID present in request headers. The "
-"'service_neutron_metadata_proxy' option must be enabled to process this "
-"header."
-msgstr ""
-
-#: nova/api/metadata/handler.py:140 nova/api/metadata/handler.py:147
+#: nova/api/metadata/handler.py:148
#, python-format
msgid "Failed to get metadata for ip: %s"
msgstr ""
-#: nova/api/metadata/handler.py:142 nova/api/metadata/handler.py:198
+#: nova/api/metadata/handler.py:150 nova/api/metadata/handler.py:207
msgid "An unknown error has occurred. Please try your request again."
msgstr ""
-#: nova/api/metadata/handler.py:160
+#: nova/api/metadata/handler.py:169
msgid "X-Instance-ID header is missing from request."
msgstr ""
-#: nova/api/metadata/handler.py:162
+#: nova/api/metadata/handler.py:171
msgid "X-Tenant-ID header is missing from request."
msgstr ""
-#: nova/api/metadata/handler.py:164
+#: nova/api/metadata/handler.py:173
msgid "Multiple X-Instance-ID headers found within request."
msgstr ""
-#: nova/api/metadata/handler.py:166
+#: nova/api/metadata/handler.py:175
msgid "Multiple X-Tenant-ID headers found within request."
msgstr ""
-#: nova/api/metadata/handler.py:180
-#, python-format
-msgid ""
-"X-Instance-ID-Signature: %(signature)s does not match the expected value:"
-" %(expected_signature)s for id: %(instance_id)s. Request From: "
-"%(remote_address)s"
-msgstr ""
-
-#: nova/api/metadata/handler.py:189
+#: nova/api/metadata/handler.py:198
msgid "Invalid proxy request signature."
msgstr ""
-#: nova/api/metadata/handler.py:196 nova/api/metadata/handler.py:203
+#: nova/api/metadata/handler.py:205
#, python-format
msgid "Failed to get metadata for instance id: %s"
msgstr ""
-#: nova/api/metadata/handler.py:207
-#, python-format
-msgid ""
-"Tenant_id %(tenant_id)s does not match tenant_id of instance "
-"%(instance_id)s."
-msgstr ""
-
-#: nova/api/metadata/vendordata_json.py:47
-msgid "file does not exist"
-msgstr ""
-
-#: nova/api/metadata/vendordata_json.py:49
-msgid "Unexpected IOError when reading"
-msgstr ""
-
-#: nova/api/metadata/vendordata_json.py:52
-msgid "failed to load json"
-msgstr ""
-
-#: nova/api/openstack/__init__.py:89
+#: nova/api/openstack/__init__.py:92
#, python-format
msgid "Caught error: %s"
msgstr ""
-#: nova/api/openstack/__init__.py:98
-#, python-format
-msgid "%(url)s returned with HTTP %(status)d"
-msgstr ""
-
-#: nova/api/openstack/__init__.py:190
+#: nova/api/openstack/__init__.py:189
msgid "Must specify an ExtensionManager class"
msgstr ""
-#: nova/api/openstack/__init__.py:236 nova/api/openstack/__init__.py:410
-#, python-format
-msgid ""
-"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such "
-"resource"
-msgstr ""
-
-#: nova/api/openstack/__init__.py:283
-#: nova/api/openstack/compute/plugins/v3/servers.py:99
-#, python-format
-msgid "Not loading %s because it is in the blacklist"
-msgstr ""
-
-#: nova/api/openstack/__init__.py:288
-#: nova/api/openstack/compute/plugins/v3/servers.py:104
-#, python-format
-msgid "Not loading %s because it is not in the whitelist"
-msgstr ""
-
-#: nova/api/openstack/__init__.py:295
-msgid "V3 API has been disabled by configuration"
-msgstr ""
-
-#: nova/api/openstack/__init__.py:308
-#, python-format
-msgid "Extensions in both blacklist and whitelist: %s"
-msgstr ""
-
-#: nova/api/openstack/__init__.py:332
-#, python-format
-msgid "Missing core API extensions: %s"
-msgstr ""
-
-#: nova/api/openstack/common.py:132
-#, python-format
-msgid ""
-"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. "
-"Bad upgrade or db corrupted?"
-msgstr ""
-
-#: nova/api/openstack/common.py:182
+#: nova/api/openstack/common.py:185
#, python-format
msgid "%s param must be an integer"
msgstr ""
-#: nova/api/openstack/common.py:185
+#: nova/api/openstack/common.py:188
#, python-format
msgid "%s param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:210
+#: nova/api/openstack/common.py:213
msgid "offset param must be an integer"
msgstr ""
-#: nova/api/openstack/common.py:216
+#: nova/api/openstack/common.py:219
msgid "limit param must be an integer"
msgstr ""
-#: nova/api/openstack/common.py:220
+#: nova/api/openstack/common.py:223
msgid "limit param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:224
+#: nova/api/openstack/common.py:227
msgid "offset param must be positive"
msgstr ""
-#: nova/api/openstack/common.py:259 nova/api/openstack/compute/flavors.py:146
-#: nova/api/openstack/compute/servers.py:603
-#: nova/api/openstack/compute/plugins/v3/flavors.py:110
-#: nova/api/openstack/compute/plugins/v3/servers.py:280
-#, python-format
-msgid "marker [%s] not found"
-msgstr ""
-
-#: nova/api/openstack/common.py:299
+#: nova/api/openstack/common.py:280
#, python-format
msgid "href %s does not contain version"
msgstr ""
-#: nova/api/openstack/common.py:314
+#: nova/api/openstack/common.py:293
msgid "Image metadata limit exceeded"
msgstr ""
-#: nova/api/openstack/common.py:322
+#: nova/api/openstack/common.py:301
msgid "Image metadata key cannot be blank"
msgstr ""
-#: nova/api/openstack/common.py:325
+#: nova/api/openstack/common.py:304
msgid "Image metadata key too long"
msgstr ""
-#: nova/api/openstack/common.py:328
+#: nova/api/openstack/common.py:307
msgid "Invalid image metadata"
msgstr ""
-#: nova/api/openstack/common.py:391
+#: nova/api/openstack/common.py:370
#, python-format
msgid "Cannot '%(action)s' while instance is in %(attr)s %(state)s"
msgstr ""
-#: nova/api/openstack/common.py:394
+#: nova/api/openstack/common.py:373
#, python-format
msgid "Cannot '%s' an instance which has never been active"
msgstr ""
-#: nova/api/openstack/common.py:397
+#: nova/api/openstack/common.py:376
#, python-format
msgid "Instance is in an invalid state for '%s'"
msgstr ""
-#: nova/api/openstack/common.py:477
-msgid "Rejecting snapshot request, snapshots currently disabled"
-msgstr ""
-
-#: nova/api/openstack/common.py:479
+#: nova/api/openstack/common.py:458
msgid "Instance snapshots are not permitted at this time."
msgstr ""
-#: nova/api/openstack/common.py:600
+#: nova/api/openstack/common.py:579
msgid "Cells is not enabled."
msgstr ""
-#: nova/api/openstack/extensions.py:197
+#: nova/api/openstack/extensions.py:198
#, python-format
msgid "Loaded extension: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:243
+#: nova/api/openstack/extensions.py:244
#: nova/api/openstack/compute/plugins/__init__.py:51
#, python-format
msgid "Exception loading extension: %s"
msgstr ""
-#: nova/api/openstack/extensions.py:278
-#, python-format
-msgid "Failed to load extension %(ext_factory)s: %(exc)s"
-msgstr ""
-
-#: nova/api/openstack/extensions.py:349
+#: nova/api/openstack/extensions.py:350
#, python-format
msgid "Failed to load extension %(classpath)s: %(exc)s"
msgstr ""
-#: nova/api/openstack/extensions.py:372
+#: nova/api/openstack/extensions.py:373
#, python-format
msgid "Failed to load extension %(ext_name)s:%(exc)s"
msgstr ""
-#: nova/api/openstack/extensions.py:494
+#: nova/api/openstack/extensions.py:495
msgid "Unexpected exception in API method"
msgstr ""
-#: nova/api/openstack/extensions.py:495
+#: nova/api/openstack/extensions.py:496
#, python-format
msgid ""
"Unexpected API Error. Please report this at "
@@ -2476,56 +2456,41 @@ msgid ""
"%s"
msgstr ""
-#: nova/api/openstack/wsgi.py:228 nova/api/openstack/wsgi.py:633
+#: nova/api/openstack/wsgi.py:230 nova/api/openstack/wsgi.py:635
msgid "cannot understand JSON"
msgstr ""
-#: nova/api/openstack/wsgi.py:638
+#: nova/api/openstack/wsgi.py:640
msgid "too many body keys"
msgstr ""
-#: nova/api/openstack/wsgi.py:682
-#, python-format
-msgid "Exception handling resource: %s"
-msgstr ""
-
-#: nova/api/openstack/wsgi.py:686
-#, python-format
-msgid "Fault thrown: %s"
-msgstr ""
-
-#: nova/api/openstack/wsgi.py:689
-#, python-format
-msgid "HTTP exception thrown: %s"
-msgstr ""
-
-#: nova/api/openstack/wsgi.py:919
+#: nova/api/openstack/wsgi.py:921
#, python-format
msgid "There is no such action: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:922 nova/api/openstack/wsgi.py:949
+#: nova/api/openstack/wsgi.py:924 nova/api/openstack/wsgi.py:951
#: nova/api/openstack/compute/server_metadata.py:57
#: nova/api/openstack/compute/server_metadata.py:75
#: nova/api/openstack/compute/server_metadata.py:100
#: nova/api/openstack/compute/server_metadata.py:126
-#: nova/api/openstack/compute/contrib/evacuate.py:45
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:58
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:73
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:95
+#: nova/api/openstack/compute/contrib/evacuate.py:47
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:60
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:75
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:97
msgid "Malformed request body"
msgstr ""
-#: nova/api/openstack/wsgi.py:926
+#: nova/api/openstack/wsgi.py:928
#, python-format
msgid "Action: '%(action)s', body: %(body)s"
msgstr ""
-#: nova/api/openstack/wsgi.py:946
+#: nova/api/openstack/wsgi.py:948
msgid "Unsupported Content-Type"
msgstr ""
-#: nova/api/openstack/wsgi.py:958
+#: nova/api/openstack/wsgi.py:960
#, python-format
msgid ""
"Malformed request URL: URL's project_id '%(project_id)s' doesn't match "
@@ -2554,7 +2519,7 @@ msgid "Initializing extension manager."
msgstr ""
#: nova/api/openstack/compute/flavors.py:107
-#: nova/api/openstack/compute/plugins/v3/flavors.py:70
+#: nova/api/openstack/compute/plugins/v3/flavors.py:72
#, python-format
msgid "Invalid is_public filter [%s]"
msgstr ""
@@ -2569,490 +2534,424 @@ msgstr ""
msgid "Invalid minDisk filter [%s]"
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:35
-#: nova/api/openstack/compute/images.py:141
-#: nova/api/openstack/compute/images.py:157
+#: nova/api/openstack/compute/flavors.py:146
+#: nova/api/openstack/compute/servers.py:606
+#: nova/api/openstack/compute/plugins/v3/flavors.py:112
+#: nova/api/openstack/compute/plugins/v3/servers.py:303
+#, python-format
+msgid "marker [%s] not found"
+msgstr ""
+
+#: nova/api/openstack/compute/image_metadata.py:37
+#: nova/api/openstack/compute/images.py:135
+#: nova/api/openstack/compute/images.py:151
msgid "Image not found."
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:78
+#: nova/api/openstack/compute/image_metadata.py:81
msgid "Incorrect request body format"
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:82
+#: nova/api/openstack/compute/image_metadata.py:85
#: nova/api/openstack/compute/server_metadata.py:79
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:108
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:85
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:77
+#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:72
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:79
msgid "Request body and URI mismatch"
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:85
+#: nova/api/openstack/compute/image_metadata.py:88
#: nova/api/openstack/compute/server_metadata.py:83
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:111
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:88
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:81
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:83
msgid "Request body contains too many items"
msgstr ""
-#: nova/api/openstack/compute/image_metadata.py:117
+#: nova/api/openstack/compute/image_metadata.py:122
msgid "Invalid metadata key"
msgstr ""
-#: nova/api/openstack/compute/images.py:162
+#: nova/api/openstack/compute/images.py:156
msgid "You are not allowed to delete the image."
msgstr ""
#: nova/api/openstack/compute/ips.py:67
-#: nova/api/openstack/compute/plugins/v3/ips.py:39
+#: nova/api/openstack/compute/plugins/v3/ips.py:41
msgid "Instance does not exist"
msgstr ""
-#: nova/api/openstack/compute/ips.py:90
-#: nova/api/openstack/compute/plugins/v3/ips.py:60
+#: nova/api/openstack/compute/ips.py:84
+#: nova/api/openstack/compute/plugins/v3/ips.py:56
msgid "Instance is not a member of specified network"
msgstr ""
-#: nova/api/openstack/compute/limits.py:161
+#: nova/api/openstack/compute/limits.py:162
#, python-format
msgid ""
"Only %(value)s %(verb)s request(s) can be made to %(uri)s every "
"%(unit_string)s."
msgstr ""
-#: nova/api/openstack/compute/limits.py:287
+#: nova/api/openstack/compute/limits.py:288
msgid "This request was rate-limited."
msgstr ""
#: nova/api/openstack/compute/server_metadata.py:37
#: nova/api/openstack/compute/server_metadata.py:122
#: nova/api/openstack/compute/server_metadata.py:177
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:41
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:43
msgid "Server does not exist"
msgstr ""
#: nova/api/openstack/compute/server_metadata.py:157
#: nova/api/openstack/compute/server_metadata.py:168
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:144
-#: nova/api/openstack/compute/plugins/v3/server_metadata.py:156
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:146
+#: nova/api/openstack/compute/plugins/v3/server_metadata.py:158
msgid "Metadata item was not found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:81
-msgid ""
-"XML support has been deprecated and may be removed as early as the Juno "
-"release."
-msgstr ""
-
-#: nova/api/openstack/compute/servers.py:551
-#: nova/api/openstack/compute/contrib/cells.py:423
-#: nova/api/openstack/compute/plugins/v3/cells.py:331
+#: nova/api/openstack/compute/servers.py:554
+#: nova/api/openstack/compute/contrib/cells.py:427
msgid "Invalid changes-since value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:570
-#: nova/api/openstack/compute/plugins/v3/servers.py:234
+#: nova/api/openstack/compute/servers.py:573
+#: nova/api/openstack/compute/plugins/v3/servers.py:257
msgid "Only administrators may list deleted instances"
msgstr ""
-#: nova/api/openstack/compute/servers.py:606
-#: nova/api/openstack/compute/plugins/v3/servers.py:283
-#, python-format
-msgid "Flavor '%s' could not be found "
-msgstr ""
-
-#: nova/api/openstack/compute/servers.py:625
-#: nova/api/openstack/compute/servers.py:772
-#: nova/api/openstack/compute/servers.py:1079
-#: nova/api/openstack/compute/servers.py:1199
-#: nova/api/openstack/compute/servers.py:1384
-#: nova/api/openstack/compute/plugins/v3/servers.py:615
-#: nova/api/openstack/compute/plugins/v3/servers.py:727
-#: nova/api/openstack/compute/plugins/v3/servers.py:846
+#: nova/api/openstack/compute/servers.py:627
+#: nova/api/openstack/compute/servers.py:774
+#: nova/api/openstack/compute/servers.py:1078
+#: nova/api/openstack/compute/servers.py:1203
+#: nova/api/openstack/compute/servers.py:1388
+#: nova/api/openstack/compute/plugins/v3/servers.py:650
+#: nova/api/openstack/compute/plugins/v3/servers.py:768
+#: nova/api/openstack/compute/plugins/v3/servers.py:889
msgid "Instance could not be found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:656
+#: nova/api/openstack/compute/servers.py:658
#, python-format
msgid "Bad personality format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:659
+#: nova/api/openstack/compute/servers.py:661
msgid "Bad personality format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:662
+#: nova/api/openstack/compute/servers.py:664
#, python-format
msgid "Personality content for %s cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:677
+#: nova/api/openstack/compute/servers.py:679
msgid "Unknown argument : port"
msgstr ""
-#: nova/api/openstack/compute/servers.py:680
-#: nova/api/openstack/compute/plugins/v3/servers.py:338
+#: nova/api/openstack/compute/servers.py:682
+#: nova/api/openstack/compute/plugins/v3/servers.py:361
#, python-format
msgid "Bad port format: port uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:690
-#: nova/api/openstack/compute/plugins/v3/servers.py:354
+#: nova/api/openstack/compute/servers.py:692
+#: nova/api/openstack/compute/plugins/v3/servers.py:377
#, python-format
msgid "Bad networks format: network uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:701
-#: nova/api/openstack/compute/plugins/v3/servers.py:327
+#: nova/api/openstack/compute/servers.py:703
+#: nova/api/openstack/compute/plugins/v3/servers.py:350
#, python-format
msgid "Invalid fixed IP address (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:714
-#: nova/api/openstack/compute/plugins/v3/servers.py:369
+#: nova/api/openstack/compute/servers.py:716
+#: nova/api/openstack/compute/plugins/v3/servers.py:392
#, python-format
msgid "Duplicate networks (%s) are not allowed"
msgstr ""
-#: nova/api/openstack/compute/servers.py:720
-#: nova/api/openstack/compute/plugins/v3/servers.py:375
+#: nova/api/openstack/compute/servers.py:722
+#: nova/api/openstack/compute/plugins/v3/servers.py:398
#, python-format
msgid "Bad network format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:723
-#: nova/api/openstack/compute/servers.py:824
-#: nova/api/openstack/compute/plugins/v3/servers.py:378
+#: nova/api/openstack/compute/servers.py:725
+#: nova/api/openstack/compute/servers.py:826
+#: nova/api/openstack/compute/plugins/v3/servers.py:401
msgid "Bad networks format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:749
+#: nova/api/openstack/compute/servers.py:751
msgid "Userdata content cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/servers.py:754
+#: nova/api/openstack/compute/servers.py:756
msgid "accessIPv4 is not proper IPv4 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:759
+#: nova/api/openstack/compute/servers.py:761
msgid "accessIPv6 is not proper IPv6 format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:788
-#: nova/api/openstack/compute/plugins/v3/servers.py:419
+#: nova/api/openstack/compute/servers.py:790
+#: nova/api/openstack/compute/plugins/v3/servers.py:443
msgid "Server name is not defined"
msgstr ""
-#: nova/api/openstack/compute/servers.py:840
-#: nova/api/openstack/compute/servers.py:968
+#: nova/api/openstack/compute/servers.py:842
+#: nova/api/openstack/compute/servers.py:970
msgid "Invalid flavorRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:880
+#: nova/api/openstack/compute/servers.py:882
msgid ""
"Using different block_device_mapping syntaxes is not allowed in the same "
"request."
msgstr ""
-#: nova/api/openstack/compute/servers.py:965
-#: nova/api/openstack/compute/plugins/v3/servers.py:495
+#: nova/api/openstack/compute/servers.py:967
+#: nova/api/openstack/compute/plugins/v3/servers.py:519
msgid "Can not find requested image"
msgstr ""
-#: nova/api/openstack/compute/servers.py:971
-#: nova/api/openstack/compute/plugins/v3/servers.py:501
+#: nova/api/openstack/compute/servers.py:973
+#: nova/api/openstack/compute/plugins/v3/servers.py:525
msgid "Invalid key_name provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:974
-#: nova/api/openstack/compute/plugins/v3/servers.py:504
+#: nova/api/openstack/compute/servers.py:976
+#: nova/api/openstack/compute/plugins/v3/servers.py:528
msgid "Invalid config_drive provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1064
+#: nova/api/openstack/compute/servers.py:1063
msgid "HostId cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1068
+#: nova/api/openstack/compute/servers.py:1067
msgid "Personality cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1094
-#: nova/api/openstack/compute/servers.py:1113
-#: nova/api/openstack/compute/plugins/v3/servers.py:626
-#: nova/api/openstack/compute/plugins/v3/servers.py:642
+#: nova/api/openstack/compute/servers.py:1093
+#: nova/api/openstack/compute/servers.py:1112
+#: nova/api/openstack/compute/plugins/v3/servers.py:662
+#: nova/api/openstack/compute/plugins/v3/servers.py:679
msgid "Instance has not been resized."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1116
-#: nova/api/openstack/compute/plugins/v3/servers.py:645
+#: nova/api/openstack/compute/servers.py:1115
+#: nova/api/openstack/compute/plugins/v3/servers.py:682
msgid "Flavor used by the instance could not be found."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1132
-#: nova/api/openstack/compute/plugins/v3/servers.py:659
+#: nova/api/openstack/compute/servers.py:1131
+#: nova/api/openstack/compute/plugins/v3/servers.py:697
msgid "Argument 'type' for reboot must be a string"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1138
-#: nova/api/openstack/compute/plugins/v3/servers.py:665
+#: nova/api/openstack/compute/servers.py:1137
+#: nova/api/openstack/compute/plugins/v3/servers.py:703
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1142
-#: nova/api/openstack/compute/plugins/v3/servers.py:669
+#: nova/api/openstack/compute/servers.py:1141
+#: nova/api/openstack/compute/plugins/v3/servers.py:707
msgid "Missing argument 'type' for reboot"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1169
-#: nova/api/openstack/compute/plugins/v3/servers.py:697
+#: nova/api/openstack/compute/servers.py:1168
+#: nova/api/openstack/compute/plugins/v3/servers.py:735
msgid "Unable to locate requested flavor."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1172
-#: nova/api/openstack/compute/plugins/v3/servers.py:700
+#: nova/api/openstack/compute/servers.py:1171
+#: nova/api/openstack/compute/plugins/v3/servers.py:738
msgid "Resize requires a flavor change."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1180
-#: nova/api/openstack/compute/plugins/v3/servers.py:708
+#: nova/api/openstack/compute/servers.py:1181
+#: nova/api/openstack/compute/plugins/v3/servers.py:748
msgid "You are not authorized to access the image the instance was started with."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1184
-#: nova/api/openstack/compute/plugins/v3/servers.py:712
+#: nova/api/openstack/compute/servers.py:1185
+#: nova/api/openstack/compute/plugins/v3/servers.py:752
msgid "Image that the instance was started with could not be found."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1188
-#: nova/api/openstack/compute/plugins/v3/servers.py:716
+#: nova/api/openstack/compute/servers.py:1189
+#: nova/api/openstack/compute/plugins/v3/servers.py:756
msgid "Invalid instance image."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1211
+#: nova/api/openstack/compute/servers.py:1215
msgid "Missing imageRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1216
-#: nova/api/openstack/compute/servers.py:1224
+#: nova/api/openstack/compute/servers.py:1220
+#: nova/api/openstack/compute/servers.py:1228
msgid "Invalid imageRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1254
+#: nova/api/openstack/compute/servers.py:1258
msgid "Missing flavorRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1267
+#: nova/api/openstack/compute/servers.py:1271
msgid "No adminPass was specified"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1275
+#: nova/api/openstack/compute/servers.py:1279
#: nova/api/openstack/compute/plugins/v3/admin_password.py:56
msgid "Unable to set password on instance"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1284
+#: nova/api/openstack/compute/servers.py:1288
msgid "Unable to parse metadata key/value pairs."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1297
+#: nova/api/openstack/compute/servers.py:1301
msgid "Resize request has invalid 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1300
+#: nova/api/openstack/compute/servers.py:1304
msgid "Resize requests require 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1320
+#: nova/api/openstack/compute/servers.py:1324
msgid "Could not parse imageRef from request."
msgstr ""
-#: nova/api/openstack/compute/servers.py:1390
-#: nova/api/openstack/compute/plugins/v3/servers.py:852
+#: nova/api/openstack/compute/servers.py:1394
+#: nova/api/openstack/compute/plugins/v3/servers.py:895
msgid "Cannot find image for rebuild"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1423
+#: nova/api/openstack/compute/servers.py:1428
msgid "createImage entity requires name attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1432
-#: nova/api/openstack/compute/contrib/admin_actions.py:286
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:85
-#: nova/api/openstack/compute/plugins/v3/servers.py:892
-msgid "Invalid metadata"
-msgstr ""
-
-#: nova/api/openstack/compute/servers.py:1490
-msgid "Invalid adminPass"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:63
-#: nova/api/openstack/compute/contrib/admin_actions.py:88
-#: nova/api/openstack/compute/contrib/admin_actions.py:113
-#: nova/api/openstack/compute/contrib/admin_actions.py:135
-#: nova/api/openstack/compute/contrib/admin_actions.py:176
-#: nova/api/openstack/compute/contrib/admin_actions.py:195
-#: nova/api/openstack/compute/contrib/admin_actions.py:214
-#: nova/api/openstack/compute/contrib/admin_actions.py:233
-#: nova/api/openstack/compute/contrib/admin_actions.py:391
-#: nova/api/openstack/compute/contrib/multinic.py:43
-#: nova/api/openstack/compute/contrib/rescue.py:45
-#: nova/api/openstack/compute/contrib/shelve.py:43
-msgid "Server not found"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:66
-msgid "Virt driver does not implement pause function."
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:70
-#, python-format
-msgid "Compute.api::pause %s"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:91
-msgid "Virt driver does not implement unpause function."
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:95
-#, python-format
-msgid "Compute.api::unpause %s"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:117
-#, python-format
-msgid "compute.api::suspend %s"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:139
-#, python-format
-msgid "compute.api::resume %s"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:163
-#, python-format
-msgid "Error in migrate %s"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:182
-#, python-format
-msgid "Compute.api::reset_network %s"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/admin_actions.py:201
-#, python-format
-msgid "Compute.api::inject_network_info %s"
+#: nova/api/openstack/compute/servers.py:1437
+#: nova/api/openstack/compute/contrib/admin_actions.py:283
+#: nova/api/openstack/compute/plugins/v3/servers.py:936
+msgid "Invalid metadata"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:218
-#, python-format
-msgid "Compute.api::lock %s"
+#: nova/api/openstack/compute/servers.py:1495
+msgid "Invalid adminPass"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:237
-#, python-format
-msgid "Compute.api::unlock %s"
+#: nova/api/openstack/compute/contrib/admin_actions.py:64
+#: nova/api/openstack/compute/contrib/admin_actions.py:86
+#: nova/api/openstack/compute/contrib/admin_actions.py:108
+#: nova/api/openstack/compute/contrib/admin_actions.py:130
+#: nova/api/openstack/compute/contrib/admin_actions.py:173
+#: nova/api/openstack/compute/contrib/admin_actions.py:192
+#: nova/api/openstack/compute/contrib/admin_actions.py:211
+#: nova/api/openstack/compute/contrib/admin_actions.py:230
+#: nova/api/openstack/compute/contrib/admin_actions.py:388
+#: nova/api/openstack/compute/contrib/multinic.py:44
+#: nova/api/openstack/compute/contrib/rescue.py:45
+#: nova/api/openstack/compute/contrib/shelve.py:43
+msgid "Server not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:263
+#: nova/api/openstack/compute/contrib/admin_actions.py:260
#, python-format
msgid "createBackup entity requires %s attribute"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:267
+#: nova/api/openstack/compute/contrib/admin_actions.py:264
msgid "Malformed createBackup entity"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:273
+#: nova/api/openstack/compute/contrib/admin_actions.py:270
msgid "createBackup attribute 'rotation' must be an integer"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:276
+#: nova/api/openstack/compute/contrib/admin_actions.py:273
msgid "createBackup attribute 'rotation' must be greater than or equal to zero"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:292
-#: nova/api/openstack/compute/contrib/console_output.py:45
+#: nova/api/openstack/compute/contrib/admin_actions.py:289
+#: nova/api/openstack/compute/contrib/console_output.py:46
#: nova/api/openstack/compute/contrib/server_start_stop.py:40
msgid "Instance not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:323
-#: nova/api/openstack/compute/plugins/v3/migrate_server.py:80
+#: nova/api/openstack/compute/contrib/admin_actions.py:320
msgid ""
"host, block_migration and disk_over_commit must be specified for live "
"migration."
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:360
+#: nova/api/openstack/compute/contrib/admin_actions.py:357
#, python-format
msgid "Live migration of instance %s to another host failed"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:363
+#: nova/api/openstack/compute/contrib/admin_actions.py:360
#, python-format
msgid "Live migration of instance %(id)s to host %(host)s failed"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:381
-#: nova/api/openstack/compute/plugins/v3/admin_actions.py:83
+#: nova/api/openstack/compute/contrib/admin_actions.py:378
#, python-format
msgid "Desired state must be specified. Valid states are: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/admin_actions.py:395
+#: nova/api/openstack/compute/contrib/agents.py:100
+#: nova/api/openstack/compute/contrib/agents.py:118
+#: nova/api/openstack/compute/contrib/agents.py:156
+#: nova/api/openstack/compute/contrib/cloudpipe_update.py:55
#, python-format
-msgid "Compute.api::resetState %s"
+msgid "Invalid request body: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:99
-#, python-format
-msgid "Cannot show aggregate: %s"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/aggregates.py:137
-#, python-format
-msgid "Cannot update aggregate: %s"
+#: nova/api/openstack/compute/contrib/aggregates.py:39
+msgid "Only host parameter can be specified"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:151
-#, python-format
-msgid "Cannot delete aggregate: %s"
+#: nova/api/openstack/compute/contrib/aggregates.py:42
+msgid "Host parameter must be specified"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:162
+#: nova/api/openstack/compute/contrib/aggregates.py:168
#, python-format
msgid "Aggregates does not have %s action"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:166
+#: nova/api/openstack/compute/contrib/aggregates.py:172
#: nova/api/openstack/compute/contrib/flavormanage.py:55
#: nova/api/openstack/compute/contrib/keypairs.py:86
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:167
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:169
msgid "Invalid request body"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:176
-#: nova/api/openstack/compute/contrib/aggregates.py:181
+#: nova/api/openstack/compute/contrib/aggregates.py:182
+#: nova/api/openstack/compute/contrib/aggregates.py:187
#, python-format
msgid "Cannot add host %(host)s in aggregate %(id)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:195
-#: nova/api/openstack/compute/contrib/aggregates.py:199
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:151
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:155
+#: nova/api/openstack/compute/contrib/aggregates.py:201
+#: nova/api/openstack/compute/contrib/aggregates.py:205
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:153
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:157
#, python-format
msgid "Cannot remove host %(host)s in aggregate %(id)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:218
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:175
+#: nova/api/openstack/compute/contrib/aggregates.py:224
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:177
msgid "The value of metadata must be a dict"
msgstr ""
-#: nova/api/openstack/compute/contrib/aggregates.py:230
+#: nova/api/openstack/compute/contrib/aggregates.py:237
#, python-format
msgid "Cannot set metadata %(metadata)s in aggregate %(id)s"
msgstr ""
@@ -3068,28 +2967,28 @@ msgstr ""
msgid "Delete snapshot with id: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:104
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:103
msgid "Attach interface"
msgstr ""
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:119
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:154
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:177
-#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:166
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:116
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:145
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:166
+#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:174
+#: nova/network/security_group/neutron_driver.py:510
+#: nova/network/security_group/neutron_driver.py:514
+#: nova/network/security_group/neutron_driver.py:518
+#: nova/network/security_group/neutron_driver.py:522
+#: nova/network/security_group/neutron_driver.py:526
msgid "Network driver does not support this function."
msgstr ""
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:123
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:120
msgid "Failed to attach interface"
msgstr ""
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:130
-#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:128
-msgid "Attachments update is not supported"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/attach_interfaces.py:142
-#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:139
+#: nova/api/openstack/compute/contrib/attach_interfaces.py:136
+#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:144
#, python-format
msgid "Detach interface %s"
msgstr ""
@@ -3103,40 +3002,33 @@ msgstr ""
msgid "Must specify id or address"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:252
+#: nova/api/openstack/compute/contrib/cells.py:250
#, python-format
msgid "Cell %(id)s not found."
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:285
-#: nova/api/openstack/compute/plugins/v3/cells.py:192
+#: nova/api/openstack/compute/contrib/cells.py:286
msgid "Cell name cannot be empty"
msgstr ""
#: nova/api/openstack/compute/contrib/cells.py:289
-#: nova/api/openstack/compute/plugins/v3/cells.py:196
msgid "Cell name cannot contain '!' or '.'"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:296
-#: nova/api/openstack/compute/plugins/v3/cells.py:203
+#: nova/api/openstack/compute/contrib/cells.py:295
msgid "Cell type must be 'parent' or 'child'"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:352
-#: nova/api/openstack/compute/contrib/cells.py:376
-#: nova/api/openstack/compute/plugins/v3/cells.py:259
-#: nova/api/openstack/compute/plugins/v3/cells.py:282
+#: nova/api/openstack/compute/contrib/cells.py:353
+#: nova/api/openstack/compute/contrib/cells.py:378
msgid "No cell information in request"
msgstr ""
#: nova/api/openstack/compute/contrib/cells.py:357
-#: nova/api/openstack/compute/plugins/v3/cells.py:264
msgid "No cell name in request"
msgstr ""
-#: nova/api/openstack/compute/contrib/cells.py:411
-#: nova/api/openstack/compute/plugins/v3/cells.py:319
+#: nova/api/openstack/compute/contrib/cells.py:415
msgid "Only 'updated_since', 'project_id' and 'deleted' are understood."
msgstr ""
@@ -3166,19 +3058,19 @@ msgstr ""
msgid "The requested console type details are not accessible"
msgstr ""
-#: nova/api/openstack/compute/contrib/console_output.py:51
+#: nova/api/openstack/compute/contrib/console_output.py:52
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr ""
-#: nova/api/openstack/compute/contrib/console_output.py:62
+#: nova/api/openstack/compute/contrib/console_output.py:63
msgid "Length in request body must be an integer value"
msgstr ""
-#: nova/api/openstack/compute/contrib/console_output.py:70
+#: nova/api/openstack/compute/contrib/console_output.py:71
msgid "Unable to get console"
msgstr ""
-#: nova/api/openstack/compute/contrib/console_output.py:75
+#: nova/api/openstack/compute/contrib/console_output.py:76
#: nova/api/openstack/compute/plugins/v3/console_output.py:60
msgid "Unable to get console log, functionality not implemented"
msgstr ""
@@ -3188,17 +3080,17 @@ msgid "Instance not yet ready"
msgstr ""
#: nova/api/openstack/compute/contrib/consoles.py:52
-#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:62
+#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:60
msgid "Unable to get vnc console, functionality not implemented"
msgstr ""
#: nova/api/openstack/compute/contrib/consoles.py:76
-#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:93
+#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:89
msgid "Unable to get spice console, functionality not implemented"
msgstr ""
#: nova/api/openstack/compute/contrib/consoles.py:101
-#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:127
+#: nova/api/openstack/compute/plugins/v3/remote_consoles.py:121
msgid "Unable to get rdp console, functionality not implemented"
msgstr ""
@@ -3207,23 +3099,27 @@ msgstr ""
msgid "%s must be either 'MANUAL' or 'AUTO'."
msgstr ""
-#: nova/api/openstack/compute/contrib/evacuate.py:53
-msgid "host and onSharedStorage must be specified."
+#: nova/api/openstack/compute/contrib/evacuate.py:54
+msgid "host must be specified."
msgstr ""
#: nova/api/openstack/compute/contrib/evacuate.py:61
+msgid "onSharedStorage must be specified."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/evacuate.py:69
#: nova/api/openstack/compute/plugins/v3/evacuate.py:67
msgid "admin password can't be changed on existing disk"
msgstr ""
-#: nova/api/openstack/compute/contrib/evacuate.py:71
-#: nova/api/openstack/compute/plugins/v3/evacuate.py:77
+#: nova/api/openstack/compute/contrib/evacuate.py:80
+#: nova/api/openstack/compute/plugins/v3/evacuate.py:78
#, python-format
msgid "Compute host %s not found."
msgstr ""
-#: nova/api/openstack/compute/contrib/evacuate.py:77
-#: nova/api/openstack/compute/plugins/v3/evacuate.py:83
+#: nova/api/openstack/compute/contrib/evacuate.py:86
+#: nova/api/openstack/compute/plugins/v3/evacuate.py:84
msgid "The target host can't be the same one."
msgstr ""
@@ -3250,8 +3146,12 @@ msgstr ""
msgid "No request body"
msgstr ""
+#: nova/api/openstack/compute/contrib/flavor_access.py:170
+#: nova/api/openstack/compute/contrib/flavor_access.py:194
+msgid "Missing tenant parameter"
+msgstr ""
+
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:56
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:42
msgid "No Request Body"
msgstr ""
@@ -3261,8 +3161,8 @@ msgstr ""
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:134
#: nova/api/openstack/compute/contrib/flavorextraspecs.py:150
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:113
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:132
+#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:96
+#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:115
#, python-format
msgid "Flavor %(flavor_id)s has no extra specs with key %(key)s."
msgstr ""
@@ -3271,77 +3171,86 @@ msgstr ""
msgid "DNS entries not found."
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:129
-#: nova/api/openstack/compute/contrib/floating_ips.py:177
+#: nova/api/openstack/compute/contrib/floating_ips.py:130
+#: nova/api/openstack/compute/contrib/floating_ips.py:186
#, python-format
msgid "Floating ip not found for id %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:162
+#: nova/api/openstack/compute/contrib/floating_ips.py:163
#, python-format
msgid "No more floating ips in pool %s."
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:164
+#: nova/api/openstack/compute/contrib/floating_ips.py:165
msgid "No more floating ips available."
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:218
-#: nova/api/openstack/compute/contrib/floating_ips.py:283
-#: nova/api/openstack/compute/contrib/security_groups.py:481
+#: nova/api/openstack/compute/contrib/floating_ips.py:169
+#, python-format
+msgid "IP allocation over quota in pool %s."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:171
+msgid "IP allocation over quota."
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/floating_ips.py:223
+#: nova/api/openstack/compute/contrib/floating_ips.py:288
+#: nova/api/openstack/compute/contrib/security_groups.py:488
msgid "Missing parameter dict"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:221
-#: nova/api/openstack/compute/contrib/floating_ips.py:286
+#: nova/api/openstack/compute/contrib/floating_ips.py:226
+#: nova/api/openstack/compute/contrib/floating_ips.py:291
msgid "Address not specified"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:227
+#: nova/api/openstack/compute/contrib/floating_ips.py:232
msgid "No nw_info cache associated with instance"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:232
+#: nova/api/openstack/compute/contrib/floating_ips.py:237
msgid "No fixed ips associated to instance"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:243
+#: nova/api/openstack/compute/contrib/floating_ips.py:248
msgid "Specified fixed address not assigned to instance"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:257
+#: nova/api/openstack/compute/contrib/floating_ips.py:262
msgid "floating ip is already associated"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:260
+#: nova/api/openstack/compute/contrib/floating_ips.py:265
msgid "l3driver call to add floating ip failed"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:263
-#: nova/api/openstack/compute/contrib/floating_ips.py:294
+#: nova/api/openstack/compute/contrib/floating_ips.py:268
+#: nova/api/openstack/compute/contrib/floating_ips.py:299
msgid "floating ip not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:268
+#: nova/api/openstack/compute/contrib/floating_ips.py:273
msgid "Error. Unable to associate floating ip"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:309
+#: nova/api/openstack/compute/contrib/floating_ips.py:314
msgid "Floating ip is not associated"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips.py:313
+#: nova/api/openstack/compute/contrib/floating_ips.py:318
#, python-format
msgid "Floating ip %(address)s is not associated with instance %(id)s."
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:118
+#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:116
#: nova/api/openstack/compute/contrib/services.py:173
#: nova/api/openstack/compute/plugins/v3/services.py:124
msgid "Unknown action"
msgstr ""
-#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:146
+#: nova/api/openstack/compute/contrib/floating_ips_bulk.py:144
#: nova/cmd/manage.py:417
#, python-format
msgid "/%s should be specified as single address(es) not in cidr format"
@@ -3351,83 +3260,79 @@ msgstr ""
msgid "fping utility is not found."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:183
-#: nova/api/openstack/compute/plugins/v3/hosts.py:128
+#: nova/api/openstack/compute/contrib/hosts.py:185
#, python-format
msgid "Invalid update setting: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:186
-#: nova/api/openstack/compute/plugins/v3/hosts.py:131
+#: nova/api/openstack/compute/contrib/hosts.py:188
#, python-format
msgid "Invalid status: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:188
-#: nova/api/openstack/compute/plugins/v3/hosts.py:133
+#: nova/api/openstack/compute/contrib/hosts.py:190
#, python-format
msgid "Invalid mode: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:190
-#: nova/api/openstack/compute/plugins/v3/hosts.py:135
+#: nova/api/openstack/compute/contrib/hosts.py:192
msgid "'status' or 'maintenance_mode' needed for host update"
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:206
-#: nova/api/openstack/compute/plugins/v3/hosts.py:152
+#: nova/api/openstack/compute/contrib/hosts.py:208
+#: nova/api/openstack/compute/plugins/v3/hosts.py:135
#, python-format
msgid "Putting host %(host_name)s in maintenance mode %(mode)s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:212
-#: nova/api/openstack/compute/plugins/v3/hosts.py:158
+#: nova/api/openstack/compute/contrib/hosts.py:214
+#: nova/api/openstack/compute/plugins/v3/hosts.py:141
msgid "Virt driver does not implement host maintenance mode."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:227
-#: nova/api/openstack/compute/plugins/v3/hosts.py:174
+#: nova/api/openstack/compute/contrib/hosts.py:229
+#: nova/api/openstack/compute/plugins/v3/hosts.py:157
#, python-format
msgid "Enabling host %s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:229
-#: nova/api/openstack/compute/plugins/v3/hosts.py:176
+#: nova/api/openstack/compute/contrib/hosts.py:231
+#: nova/api/openstack/compute/plugins/v3/hosts.py:159
#, python-format
msgid "Disabling host %s."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:234
-#: nova/api/openstack/compute/plugins/v3/hosts.py:181
+#: nova/api/openstack/compute/contrib/hosts.py:236
+#: nova/api/openstack/compute/plugins/v3/hosts.py:164
msgid "Virt driver does not implement host disabled status."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:250
-#: nova/api/openstack/compute/plugins/v3/hosts.py:199
+#: nova/api/openstack/compute/contrib/hosts.py:252
+#: nova/api/openstack/compute/plugins/v3/hosts.py:182
msgid "Virt driver does not implement host power management."
msgstr ""
-#: nova/api/openstack/compute/contrib/hosts.py:336
-#: nova/api/openstack/compute/plugins/v3/hosts.py:292
+#: nova/api/openstack/compute/contrib/hosts.py:338
+#: nova/api/openstack/compute/plugins/v3/hosts.py:275
msgid "Describe-resource is admin only functionality"
msgstr ""
-#: nova/api/openstack/compute/contrib/hypervisors.py:193
-#: nova/api/openstack/compute/contrib/hypervisors.py:205
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:93
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:105
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:140
+#: nova/api/openstack/compute/contrib/hypervisors.py:208
+#: nova/api/openstack/compute/contrib/hypervisors.py:220
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:100
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:112
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:147
#, python-format
msgid "Hypervisor with ID '%s' could not be found."
msgstr ""
-#: nova/api/openstack/compute/contrib/hypervisors.py:213
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:113
+#: nova/api/openstack/compute/contrib/hypervisors.py:228
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:120
msgid "Virt driver does not implement uptime function."
msgstr ""
-#: nova/api/openstack/compute/contrib/hypervisors.py:229
-#: nova/api/openstack/compute/contrib/hypervisors.py:239
+#: nova/api/openstack/compute/contrib/hypervisors.py:244
+#: nova/api/openstack/compute/contrib/hypervisors.py:254
#, python-format
msgid "No hypervisor matching '%s' could be found."
msgstr ""
@@ -3442,27 +3347,22 @@ msgstr ""
msgid "Quota exceeded, too many key pairs."
msgstr ""
-#: nova/api/openstack/compute/contrib/multinic.py:54
+#: nova/api/openstack/compute/contrib/multinic.py:55
msgid "Missing 'networkId' argument for addFixedIp"
msgstr ""
-#: nova/api/openstack/compute/contrib/multinic.py:70
+#: nova/api/openstack/compute/contrib/multinic.py:75
msgid "Missing 'address' argument for removeFixedIp"
msgstr ""
-#: nova/api/openstack/compute/contrib/multinic.py:80
-#, python-format
-msgid "Unable to find address %r"
-msgstr ""
-
#: nova/api/openstack/compute/contrib/networks_associate.py:40
#: nova/api/openstack/compute/contrib/networks_associate.py:56
#: nova/api/openstack/compute/contrib/networks_associate.py:74
-#: nova/api/openstack/compute/contrib/os_networks.py:78
-#: nova/api/openstack/compute/contrib/os_networks.py:93
-#: nova/api/openstack/compute/contrib/os_networks.py:106
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:110
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:137
+#: nova/api/openstack/compute/contrib/os_networks.py:79
+#: nova/api/openstack/compute/contrib/os_networks.py:94
+#: nova/api/openstack/compute/contrib/os_networks.py:107
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:112
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:144
msgid "Network not found"
msgstr ""
@@ -3478,66 +3378,52 @@ msgstr ""
msgid "Associate host is not implemented by the configured Network API"
msgstr ""
-#: nova/api/openstack/compute/contrib/os_networks.py:81
+#: nova/api/openstack/compute/contrib/os_networks.py:82
msgid "Disassociate network is not implemented by the configured Network API"
msgstr ""
-#: nova/api/openstack/compute/contrib/os_networks.py:100
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125
-#, python-format
-msgid "Deleting network with id %s"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/os_networks.py:118
+#: nova/api/openstack/compute/contrib/os_networks.py:119
msgid "Missing network in body"
msgstr ""
-#: nova/api/openstack/compute/contrib/os_networks.py:122
+#: nova/api/openstack/compute/contrib/os_networks.py:123
msgid "Network label is required"
msgstr ""
-#: nova/api/openstack/compute/contrib/os_networks.py:126
+#: nova/api/openstack/compute/contrib/os_networks.py:127
msgid "Network cidr or cidr_v6 is required"
msgstr ""
-#: nova/api/openstack/compute/contrib/os_networks.py:152
+#: nova/api/openstack/compute/contrib/os_networks.py:153
msgid "VLAN support must be enabled"
msgstr ""
-#: nova/api/openstack/compute/contrib/os_networks.py:155
+#: nova/api/openstack/compute/contrib/os_networks.py:156
#, python-format
msgid "Cannot associate network %(network)s with project %(project)s: %(message)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:83
-msgid "Failed to get default networks"
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:122
-msgid "Failed to update usages deallocating network."
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:157
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:168
msgid "No CIDR requested"
msgstr ""
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:163
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:174
msgid "Requested network does not contain enough (2+) usable hosts"
msgstr ""
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:167
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:178
msgid "CIDR is malformed."
msgstr ""
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:170
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:181
msgid "Address could not be converted."
msgstr ""
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:178
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:189
msgid "Quota exceeded, too many networks."
msgstr ""
-#: nova/api/openstack/compute/contrib/os_tenant_networks.py:191
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:202
msgid "Create networks failed"
msgstr ""
@@ -3580,23 +3466,13 @@ msgid ""
" %(quota_used)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/rescue.py:78
-#: nova/api/openstack/compute/plugins/v3/rescue.py:80
-msgid "The rescue operation is not implemented by this cloud."
-msgstr ""
-
-#: nova/api/openstack/compute/contrib/rescue.py:98
-#: nova/api/openstack/compute/plugins/v3/rescue.py:104
-msgid "The unrescue operation is not implemented by this cloud."
-msgstr ""
-
#: nova/api/openstack/compute/contrib/scheduler_hints.py:37
#: nova/api/openstack/compute/plugins/v3/scheduler_hints.py:39
msgid "Malformed scheduler_hints attribute"
msgstr ""
#: nova/api/openstack/compute/contrib/security_group_default_rules.py:127
-#: nova/api/openstack/compute/contrib/security_groups.py:386
+#: nova/api/openstack/compute/contrib/security_groups.py:394
msgid "Not enough parameters to build a valid rule."
msgstr ""
@@ -3608,81 +3484,80 @@ msgstr ""
msgid "security group default rule not found"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:394
+#: nova/api/openstack/compute/contrib/security_groups.py:402
#, python-format
msgid "Bad prefix for network in cidr %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:484
+#: nova/api/openstack/compute/contrib/security_groups.py:491
msgid "Security group not specified"
msgstr ""
-#: nova/api/openstack/compute/contrib/security_groups.py:488
+#: nova/api/openstack/compute/contrib/security_groups.py:495
msgid "Security group name cannot be empty"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_external_events.py:92
+#: nova/api/openstack/compute/contrib/server_external_events.py:93
#: nova/api/openstack/compute/plugins/v3/server_external_events.py:65
#, python-format
msgid "event entity requires key %(key)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_external_events.py:96
+#: nova/api/openstack/compute/contrib/server_external_events.py:97
#: nova/api/openstack/compute/plugins/v3/server_external_events.py:69
#, python-format
msgid "event entity contains unsupported items: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_external_events.py:102
+#: nova/api/openstack/compute/contrib/server_external_events.py:103
#: nova/api/openstack/compute/plugins/v3/server_external_events.py:75
#, python-format
msgid "Invalid event status `%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_external_events.py:121
-#: nova/api/openstack/compute/plugins/v3/server_external_events.py:94
+#: nova/api/openstack/compute/contrib/server_external_events.py:126
#, python-format
-msgid "Create event %(name)s:%(tag)s for instance %(instance_uuid)s"
+msgid "Creating event %(name)s:%(tag)s for instance %(instance_uuid)s"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_external_events.py:130
+#: nova/api/openstack/compute/contrib/server_external_events.py:148
#: nova/api/openstack/compute/plugins/v3/server_external_events.py:103
msgid "No instances found for any event"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:162
+#: nova/api/openstack/compute/contrib/server_groups.py:163
msgid "Conflicting policies configured!"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:167
+#: nova/api/openstack/compute/contrib/server_groups.py:168
#, python-format
msgid "Invalid policies: %s"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:172
+#: nova/api/openstack/compute/contrib/server_groups.py:173
msgid "Duplicate policies configured!"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:177
+#: nova/api/openstack/compute/contrib/server_groups.py:178
msgid "the body is invalid."
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:186
+#: nova/api/openstack/compute/contrib/server_groups.py:187
#, python-format
msgid "'%s' is either missing or empty."
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:192
+#: nova/api/openstack/compute/contrib/server_groups.py:193
#, python-format
msgid "Invalid format for name: '%s'"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:200
+#: nova/api/openstack/compute/contrib/server_groups.py:201
#, python-format
msgid "'%s' is not a list"
msgstr ""
-#: nova/api/openstack/compute/contrib/server_groups.py:204
+#: nova/api/openstack/compute/contrib/server_groups.py:205
#, python-format
msgid "unsupported fields: %s"
msgstr ""
@@ -3784,16 +3659,16 @@ msgstr ""
msgid "access_ip_v6 is not proper IPv6 format"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/aggregates.py:170
+#: nova/api/openstack/compute/plugins/v3/aggregates.py:172
msgid "Invalid request format for metadata"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:103
+#: nova/api/openstack/compute/plugins/v3/attach_interfaces.py:109
#, python-format
msgid "Attach interface to %s"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/cells.py:187
+#: nova/api/openstack/compute/plugins/v3/cells.py:189
#, python-format
msgid "Cell %s doesn't exist."
msgstr ""
@@ -3802,23 +3677,6 @@ msgstr ""
msgid "token not provided"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:62
-#, python-format
-msgid "create_backup entity requires %s attribute"
-msgstr ""
-
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:66
-msgid "Malformed create_backup entity"
-msgstr ""
-
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:72
-msgid "create_backup attribute 'rotation' must be an integer"
-msgstr ""
-
-#: nova/api/openstack/compute/plugins/v3/create_backup.py:75
-msgid "create_backup attribute 'rotation' must be greater than or equal to zero"
-msgstr ""
-
#: nova/api/openstack/compute/plugins/v3/extended_volumes.py:98
msgid "The volume was either invalid or not attached to the instance."
msgstr ""
@@ -3834,96 +3692,101 @@ msgstr ""
msgid "Volume %(volume_id)s is not attached to the instance %(server_id)s"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/flavors.py:94
+#: nova/api/openstack/compute/plugins/v3/flavors.py:96
#, python-format
msgid "Invalid min_ram filter [%s]"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/flavors.py:101
+#: nova/api/openstack/compute/plugins/v3/flavors.py:103
#, python-format
msgid "Invalid min_disk filter [%s]"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:66
-msgid "No or bad extra_specs provided"
+#: nova/api/openstack/compute/plugins/v3/hypervisors.py:132
+msgid "Need parameter 'query' to specify which hypervisor to filter on"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:73
-#: nova/api/openstack/compute/plugins/v3/flavors_extraspecs.py:95
-msgid "Concurrent transaction has been committed, try again"
+#: nova/api/openstack/compute/plugins/v3/pause_server.py:59
+#: nova/api/openstack/compute/plugins/v3/pause_server.py:81
+msgid "Virt driver does not implement pause function."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/hosts.py:120
-msgid "The request body invalid"
+#: nova/api/openstack/compute/plugins/v3/server_actions.py:76
+#, python-format
+msgid "Action %s not found"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/hypervisors.py:125
-msgid "Need parameter 'query' to specify which hypervisor to filter on"
+#: nova/api/openstack/compute/plugins/v3/server_diagnostics.py:46
+msgid "Unable to get diagnostics, functionality not implemented"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/server_actions.py:76
+#: nova/api/openstack/compute/plugins/v3/server_external_events.py:94
#, python-format
-msgid "Action %s not found"
+msgid "Create event %(name)s:%(tag)s for instance %(instance_uuid)s"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:212
+#: nova/api/openstack/compute/plugins/v3/servers.py:235
msgid "Invalid changes_since value"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:335
+#: nova/api/openstack/compute/plugins/v3/servers.py:306
+#, python-format
+msgid "Flavor '%s' could not be found "
+msgstr ""
+
+#: nova/api/openstack/compute/plugins/v3/servers.py:358
msgid "Unknown argument: port"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:343
+#: nova/api/openstack/compute/plugins/v3/servers.py:366
#, python-format
msgid ""
"Specified Fixed IP '%(addr)s' cannot be used with port '%(port)s': port "
"already has a Fixed IP allocated."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:412
-#: nova/api/openstack/compute/plugins/v3/servers.py:585
-msgid "The request body is invalid"
+#: nova/api/openstack/compute/plugins/v3/servers.py:494
+#: nova/api/openstack/compute/plugins/v3/servers.py:522
+msgid "Invalid flavor_ref provided."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:470
-#: nova/api/openstack/compute/plugins/v3/servers.py:498
-msgid "Invalid flavor_ref provided."
+#: nova/api/openstack/compute/plugins/v3/servers.py:620
+msgid "The request body is invalid"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:596
+#: nova/api/openstack/compute/plugins/v3/servers.py:631
msgid "host_id cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:741
+#: nova/api/openstack/compute/plugins/v3/servers.py:782
msgid "Invalid image_ref provided."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:760
+#: nova/api/openstack/compute/plugins/v3/servers.py:801
msgid "Missing image_ref attribute"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:767
+#: nova/api/openstack/compute/plugins/v3/servers.py:808
msgid "Missing flavor_ref attribute"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:780
+#: nova/api/openstack/compute/plugins/v3/servers.py:822
msgid "Resize request has invalid 'flavor_ref' attribute."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:783
+#: nova/api/openstack/compute/plugins/v3/servers.py:825
msgid "Resize requests require 'flavor_ref' attribute."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:799
+#: nova/api/openstack/compute/plugins/v3/servers.py:842
msgid "Could not parse image_ref from request."
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:883
+#: nova/api/openstack/compute/plugins/v3/servers.py:927
msgid "create_image entity requires name attribute"
msgstr ""
-#: nova/api/openstack/compute/plugins/v3/servers.py:945
+#: nova/api/openstack/compute/plugins/v3/servers.py:989
msgid "Invalid admin_password"
msgstr ""
@@ -3931,16 +3794,12 @@ msgstr ""
msgid "Disabled reason contains invalid characters or is too long"
msgstr ""
-#: nova/api/openstack/compute/views/servers.py:197
-msgid "Instance has had its instance_type removed from the DB"
-msgstr ""
-
-#: nova/api/validation/validators.py:61
+#: nova/api/validation/validators.py:73
#, python-format
msgid "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s"
msgstr ""
-#: nova/cells/manager.py:78
+#: nova/cells/manager.py:79
msgid ""
"The cells feature of Nova is considered experimental by the OpenStack "
"project because it receives much less testing than the rest of Nova. This"
@@ -3948,117 +3807,122 @@ msgid ""
" use of it in production right now may be risky."
msgstr ""
-#: nova/cells/messaging.py:205
+#: nova/cells/messaging.py:204
#, python-format
msgid "Error processing message locally: %(exc)s"
msgstr ""
-#: nova/cells/messaging.py:366 nova/cells/messaging.py:374
+#: nova/cells/messaging.py:365 nova/cells/messaging.py:373
#, python-format
msgid "destination is %(target_cell)s but routing_path is %(routing_path)s"
msgstr ""
-#: nova/cells/messaging.py:386
+#: nova/cells/messaging.py:385
#, python-format
msgid "Unknown %(cell_type)s when routing to %(target_cell)s"
msgstr ""
-#: nova/cells/messaging.py:410
+#: nova/cells/messaging.py:409
#, python-format
msgid "Error locating next hop for message: %(exc)s"
msgstr ""
-#: nova/cells/messaging.py:437
+#: nova/cells/messaging.py:436
#, python-format
msgid "Failed to send message to cell: %(next_hop)s: %(exc)s"
msgstr ""
-#: nova/cells/messaging.py:516
+#: nova/cells/messaging.py:515
#, python-format
msgid "Error locating next hops for message: %(exc)s"
msgstr ""
-#: nova/cells/messaging.py:536
+#: nova/cells/messaging.py:535
#, python-format
msgid "Error sending message to next hops: %(exc)s"
msgstr ""
-#: nova/cells/messaging.py:554
+#: nova/cells/messaging.py:553
#, python-format
msgid "Error waiting for responses from neighbor cells: %(exc)s"
msgstr ""
-#: nova/cells/messaging.py:665
+#: nova/cells/messaging.py:664
#, python-format
msgid "Unknown method '%(method)s' in compute API"
msgstr ""
-#: nova/cells/messaging.py:1096
+#: nova/cells/messaging.py:1106
#, python-format
msgid "Got message to create instance fault: %(instance_fault)s"
msgstr ""
-#: nova/cells/messaging.py:1119
+#: nova/cells/messaging.py:1129
#, python-format
msgid ""
"Forcing a sync of instances, project_id=%(projid_str)s, "
"updated_since=%(since_str)s"
msgstr ""
-#: nova/cells/messaging.py:1198
+#: nova/cells/messaging.py:1208
#, python-format
msgid "No match when trying to update BDM: %(bdm)s"
msgstr ""
-#: nova/cells/messaging.py:1673
+#: nova/cells/messaging.py:1683
#, python-format
msgid "No cell_name for %(method)s() from API"
msgstr ""
-#: nova/cells/messaging.py:1690
+#: nova/cells/messaging.py:1700
msgid "No cell_name for instance update from API"
msgstr ""
-#: nova/cells/messaging.py:1853
+#: nova/cells/messaging.py:1863
#, python-format
msgid "Returning exception %s to caller"
msgstr ""
-#: nova/cells/rpcapi.py:369
+#: nova/cells/rpcapi.py:378
msgid "Failed to notify cells of BDM update/create."
msgstr ""
-#: nova/cells/rpcapi.py:385
+#: nova/cells/rpcapi.py:394
msgid "Failed to notify cells of BDM destroy."
msgstr ""
-#: nova/cells/scheduler.py:192
+#: nova/cells/scheduler.py:191
#, python-format
msgid "Couldn't communicate with cell '%s'"
msgstr ""
-#: nova/cells/scheduler.py:196
+#: nova/cells/scheduler.py:195
msgid "Couldn't communicate with any cells"
msgstr ""
-#: nova/cells/scheduler.py:234
+#: nova/cells/scheduler.py:233
#, python-format
msgid ""
"No cells available when scheduling. Will retry in %(sleep_time)s "
"second(s)"
msgstr ""
-#: nova/cells/scheduler.py:240
+#: nova/cells/scheduler.py:239
#, python-format
msgid "Error scheduling instances %(instance_uuids)s"
msgstr ""
-#: nova/cells/state.py:352
+#: nova/cells/state.py:182
+#, python-format
+msgid "DB error: %s"
+msgstr ""
+
+#: nova/cells/state.py:363
#, python-format
msgid "Unknown cell '%(cell_name)s' when trying to update capabilities"
msgstr ""
-#: nova/cells/state.py:367
+#: nova/cells/state.py:378
#, python-format
msgid "Unknown cell '%(cell_name)s' when trying to update capacities"
msgstr ""
@@ -4098,71 +3962,71 @@ msgstr ""
msgid "Failed to load %s"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:211
+#: nova/cmd/baremetal_deploy_helper.py:210
#, python-format
msgid "parent device '%s' not found"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:214
+#: nova/cmd/baremetal_deploy_helper.py:213
#, python-format
msgid "root device '%s' not found"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:216
+#: nova/cmd/baremetal_deploy_helper.py:215
#, python-format
msgid "swap device '%s' not found"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:218
+#: nova/cmd/baremetal_deploy_helper.py:217
#, python-format
msgid "ephemeral device '%s' not found"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:228
+#: nova/cmd/baremetal_deploy_helper.py:227
msgid "Failed to detect root device UUID."
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:252
+#: nova/cmd/baremetal_deploy_helper.py:251
#, python-format
msgid "Cmd : %s"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:253
+#: nova/cmd/baremetal_deploy_helper.py:252
#, python-format
msgid "StdOut : %r"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:254
+#: nova/cmd/baremetal_deploy_helper.py:253
#, python-format
msgid "StdErr : %r"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:282
+#: nova/cmd/baremetal_deploy_helper.py:281
#, python-format
msgid "start deployment for node %(node_id)s, params %(params)s"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:291
+#: nova/cmd/baremetal_deploy_helper.py:290
#, python-format
msgid "deployment to node %s failed"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:295
+#: nova/cmd/baremetal_deploy_helper.py:294
#, python-format
msgid "deployment to node %s done"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:317
+#: nova/cmd/baremetal_deploy_helper.py:316
#, python-format
msgid "post: environ=%s"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:336
+#: nova/cmd/baremetal_deploy_helper.py:335
#, python-format
msgid "Deploy agent error message: %s"
msgstr ""
-#: nova/cmd/baremetal_deploy_helper.py:360
+#: nova/cmd/baremetal_deploy_helper.py:359
#, python-format
msgid "request is queued: node %(node_id)s, params %(params)s"
msgstr ""
@@ -4189,17 +4053,17 @@ msgstr ""
msgid "No db access allowed in nova-compute: %s"
msgstr ""
-#: nova/cmd/dhcpbridge.py:109
+#: nova/cmd/dhcpbridge.py:108
#, python-format
msgid "No db access allowed in nova-dhcpbridge: %s"
msgstr ""
-#: nova/cmd/dhcpbridge.py:132
+#: nova/cmd/dhcpbridge.py:131
#, python-format
msgid "Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'"
msgstr ""
-#: nova/cmd/dhcpbridge.py:142
+#: nova/cmd/dhcpbridge.py:141
msgid "Environment variable 'NETWORK_ID' must be set."
msgstr ""
@@ -4279,40 +4143,40 @@ msgid ""
"Use python-neutronclient instead."
msgstr ""
-#: nova/cmd/manage.py:551 nova/tests/test_nova_manage.py:217
+#: nova/cmd/manage.py:551 nova/tests/test_nova_manage.py:218
msgid "id"
msgstr ""
-#: nova/cmd/manage.py:552 nova/tests/test_nova_manage.py:218
+#: nova/cmd/manage.py:552 nova/tests/test_nova_manage.py:219
msgid "IPv4"
msgstr ""
-#: nova/cmd/manage.py:553 nova/tests/test_nova_manage.py:219
+#: nova/cmd/manage.py:553 nova/tests/test_nova_manage.py:220
msgid "IPv6"
msgstr ""
-#: nova/cmd/manage.py:554 nova/tests/test_nova_manage.py:220
+#: nova/cmd/manage.py:554 nova/tests/test_nova_manage.py:221
msgid "start address"
msgstr ""
-#: nova/cmd/manage.py:555 nova/tests/test_nova_manage.py:221
+#: nova/cmd/manage.py:555 nova/tests/test_nova_manage.py:222
msgid "DNS1"
msgstr ""
-#: nova/cmd/manage.py:556 nova/tests/test_nova_manage.py:222
+#: nova/cmd/manage.py:556 nova/tests/test_nova_manage.py:223
msgid "DNS2"
msgstr ""
-#: nova/cmd/manage.py:557 nova/tests/test_nova_manage.py:223
+#: nova/cmd/manage.py:557 nova/tests/test_nova_manage.py:224
msgid "VlanID"
msgstr ""
#: nova/cmd/manage.py:558 nova/cmd/manage.py:665
-#: nova/tests/test_nova_manage.py:224
+#: nova/tests/test_nova_manage.py:225
msgid "project"
msgstr ""
-#: nova/cmd/manage.py:559 nova/tests/test_nova_manage.py:225
+#: nova/cmd/manage.py:559 nova/tests/test_nova_manage.py:226
msgid "uuid"
msgstr ""
@@ -4523,1155 +4387,956 @@ msgstr ""
msgid "No db access allowed in nova-network: %s"
msgstr ""
-#: nova/compute/api.py:362
+#: nova/compute/api.py:355
msgid "Cannot run any more instances of this type."
msgstr ""
-#: nova/compute/api.py:369
+#: nova/compute/api.py:362
#, python-format
msgid "Can only run %s more instances of this type."
msgstr ""
-#: nova/compute/api.py:381
+#: nova/compute/api.py:374
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run %(min_count)d "
"instances. %(msg)s"
msgstr ""
-#: nova/compute/api.py:385
+#: nova/compute/api.py:378
#, python-format
msgid ""
"%(overs)s quota exceeded for %(pid)s, tried to run between %(min_count)d "
"and %(max_count)d instances. %(msg)s"
msgstr ""
-#: nova/compute/api.py:406
+#: nova/compute/api.py:399
msgid "Metadata type should be dict."
msgstr ""
-#: nova/compute/api.py:412
-#, python-format
-msgid ""
-"Quota exceeded for %(pid)s, tried to set %(num_metadata)s metadata "
-"properties"
-msgstr ""
-
-#: nova/compute/api.py:424
-#, python-format
-msgid "Metadata property key '%s' is not a string."
-msgstr ""
-
-#: nova/compute/api.py:427
-#, python-format
-msgid "Metadata property value '%(v)s' for key '%(k)s' is not a string."
-msgstr ""
-
-#: nova/compute/api.py:431
-msgid "Metadata property key blank"
-msgstr ""
-
-#: nova/compute/api.py:434
+#: nova/compute/api.py:421
msgid "Metadata property key greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:437
+#: nova/compute/api.py:424
msgid "Metadata property value greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:574
-msgid "Failed to set instance name using multi_instance_display_name_template."
-msgstr ""
-
-#: nova/compute/api.py:676
+#: nova/compute/api.py:663
msgid "Cannot attach one or more volumes to multiple instances"
msgstr ""
-#: nova/compute/api.py:718
+#: nova/compute/api.py:705
msgid "The requested availability zone is not available"
msgstr ""
-#: nova/compute/api.py:1119
+#: nova/compute/api.py:1107
msgid ""
"Images with destination_type 'volume' need to have a non-zero size "
"specified"
msgstr ""
-#: nova/compute/api.py:1150
+#: nova/compute/api.py:1138
msgid "More than one swap drive requested."
msgstr ""
-#: nova/compute/api.py:1299
-#: nova/tests/api/openstack/compute/test_servers.py:3122
-#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2460
+#: nova/compute/api.py:1277
+#: nova/tests/api/openstack/compute/test_servers.py:3199
+#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2488
msgid ""
"Unable to launch multiple instances with a single configured port ID. "
"Please launch your instance one by one with different ports."
msgstr ""
-#: nova/compute/api.py:1401
+#: nova/compute/api.py:1298
+msgid "max_count cannot be greater than 1 if an fixed_ip is specified."
+msgstr ""
+
+#: nova/compute/api.py:1404
msgid "instance termination disabled"
msgstr ""
-#: nova/compute/api.py:1416
+#: nova/compute/api.py:1418
#, python-format
msgid "Working on deleting snapshot %s from shelved instance..."
msgstr ""
-#: nova/compute/api.py:1423
+#: nova/compute/api.py:1425
#, python-format
msgid "Failed to delete snapshot from shelved instance (%s)."
msgstr ""
-#: nova/compute/api.py:1427
-msgid ""
-"Something wrong happened when trying to delete snapshot from shelved "
-"instance."
-msgstr ""
-
-#: nova/compute/api.py:1492
+#: nova/compute/api.py:1486
msgid "Instance is already in deleting state, ignoring this request"
msgstr ""
-#: nova/compute/api.py:1540
+#: nova/compute/api.py:1521
#, python-format
msgid ""
"Found an unconfirmed migration during delete, id: %(id)s, status: "
"%(status)s"
msgstr ""
-#: nova/compute/api.py:1550
+#: nova/compute/api.py:1531
msgid "Instance may have been confirmed during delete"
msgstr ""
-#: nova/compute/api.py:1567
+#: nova/compute/api.py:1548
#, python-format
msgid "Migration %s may have been confirmed during delete"
msgstr ""
-#: nova/compute/api.py:1603
+#: nova/compute/api.py:1583
#, python-format
msgid "Flavor %d not found"
msgstr ""
-#: nova/compute/api.py:1621
+#: nova/compute/api.py:1603
#, python-format
msgid "instance's host %s is down, deleting from database"
msgstr ""
-#: nova/compute/api.py:1648 nova/compute/manager.py:2279
+#: nova/compute/api.py:1630
#, python-format
msgid "Ignoring volume cleanup failure due to %s"
msgstr ""
-#: nova/compute/api.py:2043
+#: nova/compute/api.py:2030
#, python-format
msgid "snapshot for %s"
msgstr ""
-#: nova/compute/api.py:2415
+#: nova/compute/api.py:2368
+msgid "Resize to zero disk flavor is not allowed."
+msgstr ""
+
+#: nova/compute/api.py:2407
#, python-format
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance."
msgstr ""
-#: nova/compute/api.py:2584
+#: nova/compute/api.py:2582
msgid "Cannot rescue a volume-backed instance"
msgstr ""
-#: nova/compute/api.py:2811
+#: nova/compute/api.py:2809
msgid "Volume must be attached in order to detach."
msgstr ""
-#: nova/compute/api.py:2831
+#: nova/compute/api.py:2829
msgid "Old volume is attached to a different instance."
msgstr ""
-#: nova/compute/api.py:2834
+#: nova/compute/api.py:2832
msgid "New volume must be detached in order to swap."
msgstr ""
-#: nova/compute/api.py:2837
+#: nova/compute/api.py:2835
msgid "New volume must be the same size or larger."
msgstr ""
-#: nova/compute/api.py:3032
+#: nova/compute/api.py:3042
#, python-format
msgid "Instance compute service state on %s expected to be down, but it was up."
msgstr ""
-#: nova/compute/api.py:3335
+#: nova/compute/api.py:3347
msgid "Host aggregate is not empty"
msgstr ""
-#: nova/compute/api.py:3368
+#: nova/compute/api.py:3380
#, python-format
msgid "More than 1 AZ for host %s"
msgstr ""
-#: nova/compute/api.py:3403
+#: nova/compute/api.py:3415
#, python-format
msgid "Host already in availability zone %s"
msgstr ""
-#: nova/compute/api.py:3491 nova/tests/compute/test_keypairs.py:135
+#: nova/compute/api.py:3503 nova/tests/compute/test_keypairs.py:137
msgid "Keypair name contains unsafe characters"
msgstr ""
-#: nova/compute/api.py:3495 nova/tests/compute/test_keypairs.py:127
-#: nova/tests/compute/test_keypairs.py:131
-msgid "Keypair name must be between 1 and 255 characters long"
+#: nova/compute/api.py:3509 nova/tests/compute/test_keypairs.py:127
+#: nova/tests/compute/test_keypairs.py:132
+msgid "Keypair name must be string and between 1 and 255 characters long"
msgstr ""
-#: nova/compute/api.py:3583
+#: nova/compute/api.py:3597
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr ""
-#: nova/compute/api.py:3586
-#, python-format
-msgid "Security group %s cannot be empty."
-msgstr ""
-
-#: nova/compute/api.py:3594
+#: nova/compute/api.py:3607
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)s'."
msgstr ""
-#: nova/compute/api.py:3600
-#, python-format
-msgid "Security group %s should not be greater than 255 characters."
-msgstr ""
-
-#: nova/compute/api.py:3618
+#: nova/compute/api.py:3627
msgid "Quota exceeded, too many security groups."
msgstr ""
-#: nova/compute/api.py:3621
+#: nova/compute/api.py:3630
#, python-format
msgid "Create Security Group %s"
msgstr ""
-#: nova/compute/api.py:3633
+#: nova/compute/api.py:3642
#, python-format
msgid "Security group %s already exists"
msgstr ""
-#: nova/compute/api.py:3646
+#: nova/compute/api.py:3655
#, python-format
msgid "Unable to update system group '%s'"
msgstr ""
-#: nova/compute/api.py:3708
+#: nova/compute/api.py:3717
#, python-format
msgid "Unable to delete system group '%s'"
msgstr ""
-#: nova/compute/api.py:3713
+#: nova/compute/api.py:3722
msgid "Security group is still in use"
msgstr ""
-#: nova/compute/api.py:3723
-msgid "Failed to update usages deallocating security group"
-msgstr ""
-
-#: nova/compute/api.py:3726
+#: nova/compute/api.py:3735
#, python-format
msgid "Delete security group %s"
msgstr ""
-#: nova/compute/api.py:3802 nova/compute/api.py:3885
+#: nova/compute/api.py:3811 nova/compute/api.py:3894
#, python-format
msgid "Rule (%s) not found"
msgstr ""
-#: nova/compute/api.py:3818
+#: nova/compute/api.py:3827
msgid "Quota exceeded, too many security group rules."
msgstr ""
-#: nova/compute/api.py:3821
+#: nova/compute/api.py:3830
#, python-format
msgid ""
"Security group %(name)s added %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)"
msgstr ""
-#: nova/compute/api.py:3836
+#: nova/compute/api.py:3845
#, python-format
msgid ""
"Security group %(name)s removed %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)"
msgstr ""
-#: nova/compute/api.py:3892
+#: nova/compute/api.py:3901
msgid "Security group id should be integer"
msgstr ""
-#: nova/compute/claims.py:135
+#: nova/compute/claims.py:126
#, python-format
-msgid ""
-"Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB, VCPUs "
-"%(vcpus)d"
+msgid "Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d GB"
msgstr ""
-#: nova/compute/claims.py:150
+#: nova/compute/claims.py:140
msgid "Claim successful"
msgstr ""
-#: nova/compute/claims.py:153
+#: nova/compute/claims.py:143
msgid "memory"
msgstr ""
-#: nova/compute/claims.py:162
+#: nova/compute/claims.py:152
msgid "disk"
msgstr ""
-#: nova/compute/claims.py:177 nova/compute/claims.py:249
+#: nova/compute/claims.py:167 nova/compute/claims.py:230
msgid "Claim pci failed."
msgstr ""
-#: nova/compute/claims.py:180
-msgid "CPUs"
-msgstr ""
-
-#: nova/compute/claims.py:192
+#: nova/compute/claims.py:177
#, python-format
msgid "Total %(type)s: %(total)d %(unit)s, used: %(used).02f %(unit)s"
msgstr ""
-#: nova/compute/claims.py:199
+#: nova/compute/claims.py:184
#, python-format
msgid "%(type)s limit not specified, defaulting to unlimited"
msgstr ""
-#: nova/compute/claims.py:206
+#: nova/compute/claims.py:191
#, python-format
msgid "%(type)s limit: %(limit).02f %(unit)s, free: %(free).02f %(unit)s"
msgstr ""
-#: nova/compute/claims.py:212
+#: nova/compute/claims.py:197
#, python-format
msgid "Free %(type)s %(free).02f %(unit)s < requested %(requested)d %(unit)s"
msgstr ""
-#: nova/compute/flavors.py:109
+#: nova/compute/flavors.py:110
msgid ""
"Flavor names can only contain alphanumeric characters, periods, dashes, "
"underscores and spaces."
msgstr ""
-#: nova/compute/flavors.py:119
+#: nova/compute/flavors.py:120
msgid "id cannot contain leading and/or trailing whitespace(s)"
msgstr ""
-#: nova/compute/flavors.py:129
+#: nova/compute/flavors.py:130
msgid ""
"Flavor id can only contain letters from A-Z (both cases), periods, "
"dashes, underscores and spaces."
msgstr ""
-#: nova/compute/flavors.py:150
+#: nova/compute/flavors.py:151
#, python-format
msgid "'rxtx_factor' argument must be a float between 0 and %g"
msgstr ""
-#: nova/compute/flavors.py:161
+#: nova/compute/flavors.py:162
msgid "is_public must be a boolean"
msgstr ""
-#: nova/compute/flavors.py:166
-#, python-format
-msgid "DB error: %s"
-msgstr ""
-
-#: nova/compute/flavors.py:177
-#, python-format
-msgid "Instance type %s not found for deletion"
-msgstr ""
-
-#: nova/compute/flavors.py:327
+#: nova/compute/flavors.py:328
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
msgstr ""
-#: nova/compute/manager.py:278
+#: nova/compute/manager.py:284
#, python-format
msgid "Task possibly preempted: %s"
msgstr ""
-#: nova/compute/manager.py:360 nova/compute/manager.py:2849
-#, python-format
-msgid "Error while trying to clean up image %s"
-msgstr ""
-
-#: nova/compute/manager.py:501
+#: nova/compute/manager.py:508
msgid "Instance event failed"
msgstr ""
-#: nova/compute/manager.py:600
+#: nova/compute/manager.py:608
#, python-format
msgid "%s is not a valid node managed by this compute host."
msgstr ""
-#: nova/compute/manager.py:698
+#: nova/compute/manager.py:714
#, python-format
msgid ""
"Deleting instance as its host (%(instance_host)s) is not equal to our "
"host (%(our_host)s)."
msgstr ""
-#: nova/compute/manager.py:713
+#: nova/compute/manager.py:729
msgid "Instance has been marked deleted already, removing it from the hypervisor."
msgstr ""
-#: nova/compute/manager.py:733
+#: nova/compute/manager.py:749
msgid ""
"Hypervisor driver does not support instance shared storage check, "
"assuming it's not on shared storage"
msgstr ""
-#: nova/compute/manager.py:739
-msgid "Failed to check if instance shared"
-msgstr ""
-
-#: nova/compute/manager.py:805 nova/compute/manager.py:856
-msgid "Failed to complete a deletion"
-msgstr ""
-
-#: nova/compute/manager.py:838
+#: nova/compute/manager.py:854
msgid ""
"Service started deleting the instance during the previous run, but did "
"not finish. Restarting the deletion now."
msgstr ""
-#: nova/compute/manager.py:879
+#: nova/compute/manager.py:895
#, python-format
msgid ""
"Instance in transitional state (%(task_state)s) at start-up and power "
"state is (%(power_state)s), clearing task state"
msgstr ""
-#: nova/compute/manager.py:897
-msgid "Failed to stop instance"
-msgstr ""
-
-#: nova/compute/manager.py:909
-msgid "Failed to start instance"
-msgstr ""
-
-#: nova/compute/manager.py:934
-msgid "Failed to revert crashed migration"
-msgstr ""
-
-#: nova/compute/manager.py:937
+#: nova/compute/manager.py:953
msgid "Instance found in migrating state during startup. Resetting task_state"
msgstr ""
-#: nova/compute/manager.py:954
+#: nova/compute/manager.py:970
msgid "Rebooting instance after nova-compute restart."
msgstr ""
-#: nova/compute/manager.py:964
+#: nova/compute/manager.py:980
msgid "Hypervisor driver does not support resume guests"
msgstr ""
-#: nova/compute/manager.py:969
+#: nova/compute/manager.py:985
msgid "Failed to resume instance"
msgstr ""
-#: nova/compute/manager.py:978
+#: nova/compute/manager.py:994
msgid "Hypervisor driver does not support firewall rules"
msgstr ""
-#: nova/compute/manager.py:1003
+#: nova/compute/manager.py:1019
#, python-format
-msgid "Lifecycle event %(state)d on VM %(uuid)s"
+msgid "VM %(state)s (Lifecycle Event)"
msgstr ""
-#: nova/compute/manager.py:1019
+#: nova/compute/manager.py:1035
#, python-format
msgid "Unexpected power state %d"
msgstr ""
-#: nova/compute/manager.py:1124
+#: nova/compute/manager.py:1140
msgid "Hypervisor driver does not support security groups."
msgstr ""
-#: nova/compute/manager.py:1164
+#: nova/compute/manager.py:1178
#, python-format
msgid "Volume id: %s finished being created but was not set as 'available'"
msgstr ""
-#: nova/compute/manager.py:1222 nova/compute/manager.py:1978
+#: nova/compute/manager.py:1235 nova/compute/manager.py:2064
msgid "Success"
msgstr ""
-#: nova/compute/manager.py:1246
+#: nova/compute/manager.py:1259
msgid "Instance disappeared before we could start it"
msgstr ""
-#: nova/compute/manager.py:1274
+#: nova/compute/manager.py:1286
msgid "Anti-affinity instance group policy was violated."
msgstr ""
-#: nova/compute/manager.py:1351
-msgid "Failed to dealloc network for deleted instance"
-msgstr ""
-
-#: nova/compute/manager.py:1356
+#: nova/compute/manager.py:1369
msgid "Instance disappeared during build"
msgstr ""
-#: nova/compute/manager.py:1372
-msgid "Failed to dealloc network for failed instance"
-msgstr ""
-
-#: nova/compute/manager.py:1399
+#: nova/compute/manager.py:1412
#, python-format
msgid "Error: %s"
msgstr ""
-#: nova/compute/manager.py:1445 nova/compute/manager.py:3473
-msgid "Error trying to reschedule"
-msgstr ""
-
-#: nova/compute/manager.py:1500
+#: nova/compute/manager.py:1514
msgid "Instance build timed out. Set to error state."
msgstr ""
-#: nova/compute/manager.py:1510 nova/compute/manager.py:1870
+#: nova/compute/manager.py:1524 nova/compute/manager.py:1894
msgid "Starting instance..."
msgstr ""
-#: nova/compute/manager.py:1528
+#: nova/compute/manager.py:1542
#, python-format
msgid ""
"Treating negative config value (%(retries)s) for "
"'network_allocate_retries' as 0."
msgstr ""
-#: nova/compute/manager.py:1553
-#, python-format
-msgid "Instance failed network setup after %(attempts)d attempt(s)"
-msgstr ""
-
-#: nova/compute/manager.py:1557
+#: nova/compute/manager.py:1571
#, python-format
msgid "Instance failed network setup (attempt %(attempt)d of %(attempts)d)"
msgstr ""
-#: nova/compute/manager.py:1738
-msgid "Instance failed block device setup"
-msgstr ""
-
-#: nova/compute/manager.py:1758 nova/compute/manager.py:2086
-#: nova/compute/manager.py:3985
-msgid "Instance failed to spawn"
-msgstr ""
-
-#: nova/compute/manager.py:1937
-msgid "Unexpected build failure, not rescheduling build."
-msgstr ""
-
-#: nova/compute/manager.py:2002
+#: nova/compute/manager.py:2027
#, python-format
msgid "Failed to allocate the network(s) with error %s, not rescheduling."
msgstr ""
-#: nova/compute/manager.py:2008 nova/compute/manager.py:2048
-msgid "Failed to allocate network(s)"
-msgstr ""
-
-#: nova/compute/manager.py:2012 nova/compute/manager.py:2050
+#: nova/compute/manager.py:2037 nova/compute/manager.py:2087
msgid "Failed to allocate the network(s), not rescheduling."
msgstr ""
-#: nova/compute/manager.py:2074
-msgid "Failure prepping block device"
-msgstr ""
-
-#: nova/compute/manager.py:2076
+#: nova/compute/manager.py:2113
msgid "Failure prepping block device."
msgstr ""
-#: nova/compute/manager.py:2099
+#: nova/compute/manager.py:2134
msgid "Could not clean up failed build, not rescheduling"
msgstr ""
-#: nova/compute/manager.py:2109
-msgid "Failed to deallocate networks"
-msgstr ""
-
-#: nova/compute/manager.py:2130
-msgid "Failed to cleanup volumes for failed build, not rescheduling"
-msgstr ""
-
-#: nova/compute/manager.py:2169
+#: nova/compute/manager.py:2192
msgid "Failed to deallocate network for instance."
msgstr ""
-#: nova/compute/manager.py:2178
+#: nova/compute/manager.py:2213
#, python-format
msgid "%(action_str)s instance"
msgstr ""
-#: nova/compute/manager.py:2222
-#, python-format
-msgid "Ignoring DiskNotFound: %s"
-msgstr ""
-
-#: nova/compute/manager.py:2225
-#, python-format
-msgid "Ignoring VolumeNotFound: %s"
-msgstr ""
-
-#: nova/compute/manager.py:2324
+#: nova/compute/manager.py:2368
msgid "Instance disappeared during terminate"
msgstr ""
-#: nova/compute/manager.py:2330 nova/compute/manager.py:3653
-#: nova/compute/manager.py:5671
-msgid "Setting instance vm_state to ERROR"
-msgstr ""
-
-#: nova/compute/manager.py:2503
+#: nova/compute/manager.py:2554
msgid "Rebuilding instance"
msgstr ""
-#: nova/compute/manager.py:2516
+#: nova/compute/manager.py:2567
msgid "Invalid state of instance files on shared storage"
msgstr ""
-#: nova/compute/manager.py:2520
+#: nova/compute/manager.py:2571
msgid "disk on shared storage, recreating using existing disk"
msgstr ""
-#: nova/compute/manager.py:2524
+#: nova/compute/manager.py:2575
#, python-format
msgid "disk not on shared storage, rebuilding from: '%s'"
msgstr ""
-#: nova/compute/manager.py:2535 nova/compute/manager.py:4790
-#, python-format
-msgid "Failed to get compute_info for %s"
-msgstr ""
-
-#: nova/compute/manager.py:2611
-#, python-format
-msgid "bringing vm to original state: '%s'"
-msgstr ""
-
-#: nova/compute/manager.py:2642
+#: nova/compute/manager.py:2694
#, python-format
msgid "Detaching from volume api: %s"
msgstr ""
-#: nova/compute/manager.py:2669
+#: nova/compute/manager.py:2721
msgid "Rebooting instance"
msgstr ""
-#: nova/compute/manager.py:2686
+#: nova/compute/manager.py:2738
#, python-format
msgid ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:2722
+#: nova/compute/manager.py:2774
msgid "Reboot failed but instance is running"
msgstr ""
-#: nova/compute/manager.py:2730
+#: nova/compute/manager.py:2782
#, python-format
msgid "Cannot reboot instance: %s"
msgstr ""
-#: nova/compute/manager.py:2742
+#: nova/compute/manager.py:2794
msgid "Instance disappeared during reboot"
msgstr ""
-#: nova/compute/manager.py:2810
+#: nova/compute/manager.py:2862
msgid "instance snapshotting"
msgstr ""
-#: nova/compute/manager.py:2816
+#: nova/compute/manager.py:2868
#, python-format
msgid ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:2854
+#: nova/compute/manager.py:2901
+#, python-format
+msgid "Error while trying to clean up image %s"
+msgstr ""
+
+#: nova/compute/manager.py:2906
msgid "Image not found during snapshot"
msgstr ""
-#: nova/compute/manager.py:2936
+#: nova/compute/manager.py:2988
#, python-format
msgid "Failed to set admin password. Instance %s is not running"
msgstr ""
-#: nova/compute/manager.py:2943
+#: nova/compute/manager.py:2995
msgid "Root password set"
msgstr ""
-#: nova/compute/manager.py:2948
+#: nova/compute/manager.py:3000
msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr ""
-#: nova/compute/manager.py:2961
-#, python-format
-msgid "set_admin_password failed: %s"
-msgstr ""
-
-#: nova/compute/manager.py:2967
+#: nova/compute/manager.py:3019
msgid "error setting admin password"
msgstr ""
-#: nova/compute/manager.py:2983
+#: nova/compute/manager.py:3035
#, python-format
msgid ""
"trying to inject a file into a non-running (state: %(current_state)s "
"expected: %(expected_state)s)"
msgstr ""
-#: nova/compute/manager.py:2988
+#: nova/compute/manager.py:3040
#, python-format
msgid "injecting file to %s"
msgstr ""
-#: nova/compute/manager.py:3006
+#: nova/compute/manager.py:3058
msgid ""
"Unable to find a different image to use for rescue VM, using instance's "
"current image"
msgstr ""
-#: nova/compute/manager.py:3025
+#: nova/compute/manager.py:3077
msgid "Rescuing"
msgstr ""
-#: nova/compute/manager.py:3046
-msgid "Error trying to Rescue Instance"
-msgstr ""
-
-#: nova/compute/manager.py:3050
+#: nova/compute/manager.py:3102
#, python-format
msgid "Driver Error: %s"
msgstr ""
-#: nova/compute/manager.py:3073
+#: nova/compute/manager.py:3125
msgid "Unrescuing"
msgstr ""
-#: nova/compute/manager.py:3144
+#: nova/compute/manager.py:3196
#, python-format
msgid "Migration %s is not found during confirmation"
msgstr ""
-#: nova/compute/manager.py:3149
+#: nova/compute/manager.py:3201
#, python-format
msgid "Migration %s is already confirmed"
msgstr ""
-#: nova/compute/manager.py:3153
+#: nova/compute/manager.py:3205
#, python-format
msgid ""
"Unexpected confirmation status '%(status)s' of migration %(id)s, exit "
"confirmation process"
msgstr ""
-#: nova/compute/manager.py:3167
+#: nova/compute/manager.py:3219
msgid "Instance is not found during confirmation"
msgstr ""
-#: nova/compute/manager.py:3348
+#: nova/compute/manager.py:3400
#, python-format
msgid "Updating instance to original state: '%s'"
msgstr ""
-#: nova/compute/manager.py:3371
+#: nova/compute/manager.py:3423
msgid "Instance has no source host"
msgstr ""
-#: nova/compute/manager.py:3377
+#: nova/compute/manager.py:3429
msgid "destination same as source!"
msgstr ""
-#: nova/compute/manager.py:3395
+#: nova/compute/manager.py:3447
msgid "Migrating"
msgstr ""
-#: nova/compute/manager.py:3659
-#, python-format
-msgid "Failed to rollback quota for failed finish_resize: %s"
-msgstr ""
-
-#: nova/compute/manager.py:3719
+#: nova/compute/manager.py:3784
msgid "Pausing"
msgstr ""
-#: nova/compute/manager.py:3736
+#: nova/compute/manager.py:3801
msgid "Unpausing"
msgstr ""
-#: nova/compute/manager.py:3777
+#: nova/compute/manager.py:3842 nova/compute/manager.py:3859
msgid "Retrieving diagnostics"
msgstr ""
-#: nova/compute/manager.py:3812
+#: nova/compute/manager.py:3895
msgid "Resuming"
msgstr ""
-#: nova/compute/manager.py:4028
+#: nova/compute/manager.py:4115
msgid "Get console output"
msgstr ""
-#: nova/compute/manager.py:4227
+#: nova/compute/manager.py:4314
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:4236
-#, python-format
-msgid "Failed to attach %(volume_id)s at %(mountpoint)s"
-msgstr ""
-
-#: nova/compute/manager.py:4252
+#: nova/compute/manager.py:4339
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr ""
-#: nova/compute/manager.py:4263
+#: nova/compute/manager.py:4350
msgid "Detaching volume from unknown instance"
msgstr ""
-#: nova/compute/manager.py:4275
-#, python-format
-msgid "Failed to detach volume %(volume_id)s from %(mp)s"
-msgstr ""
-
-#: nova/compute/manager.py:4348
-#, python-format
-msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s"
-msgstr ""
-
-#: nova/compute/manager.py:4355
-#, python-format
-msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s"
-msgstr ""
-
-#: nova/compute/manager.py:4442
+#: nova/compute/manager.py:4544
#, python-format
msgid "allocate_port_for_instance returned %(ports)s ports"
msgstr ""
-#: nova/compute/manager.py:4462
+#: nova/compute/manager.py:4568
#, python-format
msgid "Port %s is not attached"
msgstr ""
-#: nova/compute/manager.py:4474 nova/tests/compute/test_compute.py:10545
+#: nova/compute/manager.py:4580 nova/tests/compute/test_compute.py:10791
#, python-format
msgid "Host %s not found"
msgstr ""
-#: nova/compute/manager.py:4628
-#, python-format
-msgid "Pre live migration failed at %s"
-msgstr ""
-
-#: nova/compute/manager.py:4658
+#: nova/compute/manager.py:4798
msgid "_post_live_migration() is started.."
msgstr ""
-#: nova/compute/manager.py:4731
+#: nova/compute/manager.py:4874
#, python-format
msgid "Migrating instance to %s finished successfully."
msgstr ""
-#: nova/compute/manager.py:4733
+#: nova/compute/manager.py:4876
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
msgstr ""
-#: nova/compute/manager.py:4758
+#: nova/compute/manager.py:4901
msgid "Post operation of migration started"
msgstr ""
-#: nova/compute/manager.py:4967
+#: nova/compute/manager.py:5106
msgid "An error occurred while refreshing the network cache."
msgstr ""
-#: nova/compute/manager.py:5021
+#: nova/compute/manager.py:5159
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
msgstr ""
-#: nova/compute/manager.py:5026
+#: nova/compute/manager.py:5164
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr ""
-#: nova/compute/manager.py:5035
+#: nova/compute/manager.py:5173
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/compute/manager.py:5045
+#: nova/compute/manager.py:5183
#, python-format
msgid "Instance %s not found"
msgstr ""
-#: nova/compute/manager.py:5050
+#: nova/compute/manager.py:5188
msgid "In ERROR state"
msgstr ""
-#: nova/compute/manager.py:5057
+#: nova/compute/manager.py:5195
#, python-format
msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None"
msgstr ""
-#: nova/compute/manager.py:5068
+#: nova/compute/manager.py:5206
#, python-format
msgid "Error auto-confirming resize: %s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:5097
-msgid "Periodic task failed to offload instance."
-msgstr ""
-
-#: nova/compute/manager.py:5117
+#: nova/compute/manager.py:5255
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
msgstr ""
-#: nova/compute/manager.py:5137
-#, python-format
-msgid "Failed to generate usage audit for instance on host %s"
-msgstr ""
-
-#: nova/compute/manager.py:5166
+#: nova/compute/manager.py:5304
msgid "Updating bandwidth usage cache"
msgstr ""
-#: nova/compute/manager.py:5188
+#: nova/compute/manager.py:5326
msgid "Bandwidth usage not supported by hypervisor."
msgstr ""
-#: nova/compute/manager.py:5311
+#: nova/compute/manager.py:5449
#, python-format
msgid ""
"Found %(num_db_instances)s in the database and %(num_vm_instances)s on "
"the hypervisor."
msgstr ""
-#: nova/compute/manager.py:5318 nova/compute/manager.py:5381
-#, python-format
-msgid "During sync_power_state the instance has a pending task (%(task)s). Skip."
-msgstr ""
-
-#: nova/compute/manager.py:5342
-msgid "Periodic sync_power_state task had an error while processing an instance."
-msgstr ""
-
-#: nova/compute/manager.py:5368
+#: nova/compute/manager.py:5515
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
msgstr ""
-#: nova/compute/manager.py:5406
-msgid "Instance shutdown by itself. Calling the stop API."
+#: nova/compute/manager.py:5528
+#, python-format
+msgid "During sync_power_state the instance has a pending task (%(task)s). Skip."
msgstr ""
-#: nova/compute/manager.py:5418 nova/compute/manager.py:5427
-#: nova/compute/manager.py:5458 nova/compute/manager.py:5469
-msgid "error during stop() in sync_power_state."
+#: nova/compute/manager.py:5553
+msgid "Instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:5422
+#: nova/compute/manager.py:5572
msgid "Instance is suspended unexpectedly. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:5438
+#: nova/compute/manager.py:5588
msgid "Instance is paused unexpectedly. Ignore."
msgstr ""
-#: nova/compute/manager.py:5444
+#: nova/compute/manager.py:5594
msgid "Instance is unexpectedly not found. Ignore."
msgstr ""
-#: nova/compute/manager.py:5450
+#: nova/compute/manager.py:5600
msgid "Instance is not stopped. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:5464
+#: nova/compute/manager.py:5614
msgid "Paused instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:5478
+#: nova/compute/manager.py:5628
msgid "Instance is not (soft-)deleted."
msgstr ""
-#: nova/compute/manager.py:5507
+#: nova/compute/manager.py:5658
msgid "Reclaiming deleted instance"
msgstr ""
-#: nova/compute/manager.py:5511
+#: nova/compute/manager.py:5662
#, python-format
msgid "Periodic reclaim failed to delete instance: %s"
msgstr ""
-#: nova/compute/manager.py:5536
+#: nova/compute/manager.py:5687
#, python-format
msgid "Deleting orphan compute node %s"
msgstr ""
-#: nova/compute/manager.py:5544 nova/compute/resource_tracker.py:392
+#: nova/compute/manager.py:5695 nova/compute/resource_tracker.py:406
#, python-format
msgid "No service record for host %s"
msgstr ""
-#: nova/compute/manager.py:5585
+#: nova/compute/manager.py:5735
#, python-format
msgid ""
"Detected instance with name label '%s' which is marked as DELETED but "
"still present on host."
msgstr ""
-#: nova/compute/manager.py:5591
+#: nova/compute/manager.py:5741
#, python-format
msgid ""
"Powering off instance with name label '%s' which is marked as DELETED but"
" still present on host."
msgstr ""
-#: nova/compute/manager.py:5600
+#: nova/compute/manager.py:5750
msgid "set_bootable is not implemented for the current driver"
msgstr ""
-#: nova/compute/manager.py:5605
+#: nova/compute/manager.py:5755
msgid "Failed to power off instance"
msgstr ""
-#: nova/compute/manager.py:5609
+#: nova/compute/manager.py:5759
#, python-format
msgid ""
"Destroying instance with name label '%s' which is marked as DELETED but "
"still present on host."
msgstr ""
-#: nova/compute/manager.py:5619
+#: nova/compute/manager.py:5769
#, python-format
msgid "Periodic cleanup failed to delete instance: %s"
msgstr ""
-#: nova/compute/manager.py:5623
+#: nova/compute/manager.py:5773
#, python-format
msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action"
msgstr ""
-#: nova/compute/manager.py:5654
+#: nova/compute/manager.py:5805
#, python-format
msgid "Setting instance back to %(state)s after: %(error)s"
msgstr ""
-#: nova/compute/manager.py:5664
+#: nova/compute/manager.py:5815
#, python-format
msgid "Setting instance back to ACTIVE after: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:106
+#: nova/compute/resource_tracker.py:111
msgid ""
"Host field should not be set on the instance until resources have been "
"claimed."
msgstr ""
-#: nova/compute/resource_tracker.py:111
+#: nova/compute/resource_tracker.py:116
msgid ""
"Node field should not be set on the instance until resources have been "
"claimed."
msgstr ""
-#: nova/compute/resource_tracker.py:273
+#: nova/compute/resource_tracker.py:276
#, python-format
msgid "Cannot get the metrics from %s."
msgstr ""
-#: nova/compute/resource_tracker.py:292
+#: nova/compute/resource_tracker.py:295
msgid "Auditing locally available compute resources"
msgstr ""
-#: nova/compute/resource_tracker.py:297
+#: nova/compute/resource_tracker.py:300
msgid ""
"Virt driver does not support 'get_available_resource' Compute tracking "
"is disabled."
msgstr ""
-#: nova/compute/resource_tracker.py:372
+#: nova/compute/resource_tracker.py:375
#, python-format
msgid "Compute_service record created for %(host)s:%(node)s"
msgstr ""
-#: nova/compute/resource_tracker.py:378
+#: nova/compute/resource_tracker.py:381
#, python-format
msgid "Compute_service record updated for %(host)s:%(node)s"
msgstr ""
-#: nova/compute/resource_tracker.py:431
+#: nova/compute/resource_tracker.py:446
#, python-format
-msgid "Free ram (MB): %s"
+msgid ""
+"Total physical ram (MB): %(pram)s, total allocated virtual ram (MB): "
+"%(vram)s"
msgstr ""
-#: nova/compute/resource_tracker.py:432
+#: nova/compute/resource_tracker.py:450
#, python-format
msgid "Free disk (GB): %s"
msgstr ""
-#: nova/compute/resource_tracker.py:437
+#: nova/compute/resource_tracker.py:454
#, python-format
-msgid "Free VCPUS: %s"
+msgid "Total usable vcpus: %(tcpu)s, total allocated vcpus: %(ucpu)s"
msgstr ""
-#: nova/compute/resource_tracker.py:439
+#: nova/compute/resource_tracker.py:458
msgid "Free VCPU information unavailable"
msgstr ""
-#: nova/compute/resource_tracker.py:442
+#: nova/compute/resource_tracker.py:461
#, python-format
msgid "PCI stats: %s"
msgstr ""
-#: nova/compute/resource_tracker.py:478
+#: nova/compute/resource_tracker.py:512
#, python-format
msgid "Updating from migration %s"
msgstr ""
-#: nova/compute/resource_tracker.py:545
+#: nova/compute/resource_tracker.py:577
msgid "Instance not resizing, skipping migration."
msgstr ""
-#: nova/compute/resource_tracker.py:560
+#: nova/compute/resource_tracker.py:592
msgid "Flavor could not be found, skipping migration."
msgstr ""
-#: nova/compute/resource_tracker.py:650
+#: nova/compute/resource_tracker.py:682
#, python-format
msgid ""
"Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB "
"memory)"
msgstr ""
-#: nova/compute/resource_tracker.py:664
+#: nova/compute/resource_tracker.py:696
#, python-format
msgid "Missing keys: %s"
msgstr ""
@@ -5682,42 +5347,26 @@ msgstr ""
#: nova/compute/rpcapi.py:60
#, python-format
-msgid "Unable to find host for Instance %s"
-msgstr ""
-
-#: nova/compute/utils.py:209
-#, python-format
-msgid "Can't access image %(image_id)s: %(error)s"
-msgstr ""
-
-#: nova/compute/utils.py:333
-#, python-format
-msgid ""
-"No host name specified for the notification of HostAPI.%s and it will be "
-"ignored"
+msgid "Unable to find host for Instance %s"
msgstr ""
-#: nova/compute/utils.py:461
-#, python-format
-msgid ""
-"Value of 0 or None specified for %s. This behaviour will change in "
-"meaning in the K release, to mean 'call at the default rate' rather than "
-"'do not call'. To keep the 'do not call' behaviour, use a negative value."
+#: nova/compute/stats.py:49
+msgid "Unexpected type adding stats"
msgstr ""
-#: nova/compute/monitors/__init__.py:177
+#: nova/compute/monitors/__init__.py:176
#, python-format
msgid ""
"Excluding monitor %(monitor_name)s due to metric name overlap; "
"overlapping metrics: %(overlap)s"
msgstr ""
-#: nova/compute/monitors/__init__.py:185
+#: nova/compute/monitors/__init__.py:184
#, python-format
msgid "Monitor %(monitor_name)s cannot be used: %(ex)s"
msgstr ""
-#: nova/compute/monitors/__init__.py:191
+#: nova/compute/monitors/__init__.py:190
#, python-format
msgid "The following monitors have been disabled: %s"
msgstr ""
@@ -5727,46 +5376,50 @@ msgstr ""
msgid "Not all properties needed are implemented in the compute driver: %s"
msgstr ""
-#: nova/conductor/api.py:300
+#: nova/conductor/api.py:315
msgid "nova-conductor connection established successfully"
msgstr ""
-#: nova/conductor/api.py:305
+#: nova/conductor/api.py:320
msgid ""
"Timed out waiting for nova-conductor. Is it running? Or did this service"
" start before nova-conductor? Reattempting establishment of nova-"
"conductor connection..."
msgstr ""
-#: nova/conductor/manager.py:124
+#: nova/conductor/manager.py:123
#, python-format
msgid "Instance update attempted for '%(key)s' on %(instance_uuid)s"
msgstr ""
-#: nova/conductor/manager.py:522
+#: nova/conductor/manager.py:519
msgid "No valid host found for cold migrate"
msgstr ""
-#: nova/conductor/manager.py:586
+#: nova/conductor/manager.py:582
#, python-format
msgid ""
"Migration of instance %(instance_id)s to host %(dest)s unexpectedly "
"failed."
msgstr ""
-#: nova/conductor/manager.py:673
+#: nova/conductor/manager.py:669
#, python-format
msgid "Unshelve attempted but the image %s cannot be found."
msgstr ""
-#: nova/conductor/manager.py:696
+#: nova/conductor/manager.py:692
msgid "No valid host found for unshelve instance"
msgstr ""
-#: nova/conductor/manager.py:700
+#: nova/conductor/manager.py:696
msgid "Unshelve attempted but vm_state not SHELVED or SHELVED_OFFLOADED"
msgstr ""
+#: nova/conductor/manager.py:733
+msgid "No valid host found for rebuild"
+msgstr ""
+
#: nova/conductor/tasks/live_migrate.py:113
#, python-format
msgid ""
@@ -5835,85 +5488,85 @@ msgstr ""
msgid "Failed to notify cells of instance update"
msgstr ""
-#: nova/db/api.py:1685
+#: nova/db/api.py:1683
msgid "Failed to notify cells of bw_usage update"
msgstr ""
-#: nova/db/sqlalchemy/api.py:204
+#: nova/db/sqlalchemy/api.py:207
#, python-format
msgid "Deadlock detected when running '%(func_name)s': Retrying..."
msgstr ""
-#: nova/db/sqlalchemy/api.py:245
+#: nova/db/sqlalchemy/api.py:248
msgid "model or base_model parameter should be subclass of NovaBase"
msgstr ""
-#: nova/db/sqlalchemy/api.py:258
-#: nova/openstack/common/db/sqlalchemy/utils.py:174
-#: nova/virt/baremetal/db/sqlalchemy/api.py:60
+#: nova/db/sqlalchemy/api.py:261
+#: nova/openstack/common/db/sqlalchemy/utils.py:173
+#: nova/virt/baremetal/db/sqlalchemy/api.py:61
#, python-format
msgid "Unrecognized read_deleted value '%s'"
msgstr ""
-#: nova/db/sqlalchemy/api.py:745
+#: nova/db/sqlalchemy/api.py:753
#, python-format
msgid "Invalid floating ip id %s in request"
msgstr ""
-#: nova/db/sqlalchemy/api.py:850
+#: nova/db/sqlalchemy/api.py:858
msgid "Failed to update usages bulk deallocating floating IP"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1006
+#: nova/db/sqlalchemy/api.py:1007
#, python-format
msgid "Invalid floating IP %s in request"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1308 nova/db/sqlalchemy/api.py:1347
+#: nova/db/sqlalchemy/api.py:1310 nova/db/sqlalchemy/api.py:1349
#, python-format
msgid "Invalid fixed IP Address %s in request"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1482
+#: nova/db/sqlalchemy/api.py:1484
#, python-format
msgid "Invalid virtual interface address %s in request"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1576
+#: nova/db/sqlalchemy/api.py:1578
#, python-format
msgid ""
"Unknown osapi_compute_unique_server_name_scope value: %s Flag must be "
"empty, \"global\" or \"project\""
msgstr ""
-#: nova/db/sqlalchemy/api.py:1735
+#: nova/db/sqlalchemy/api.py:1738
#, python-format
msgid "Invalid instance id %s in request"
msgstr ""
-#: nova/db/sqlalchemy/api.py:2013
+#: nova/db/sqlalchemy/api.py:2017
#, python-format
msgid "Invalid field name: %s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:3242
+#: nova/db/sqlalchemy/api.py:3246
#, python-format
msgid "Change will make usage less than 0 for the following resources: %s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:4892
+#: nova/db/sqlalchemy/api.py:4898
#, python-format
msgid ""
"Volume(%s) has lower stats then what is in the database. Instance must "
"have been rebooted or crashed. Updating totals."
msgstr ""
-#: nova/db/sqlalchemy/api.py:5249
+#: nova/db/sqlalchemy/api.py:5262
#, python-format
msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries"
msgstr ""
-#: nova/db/sqlalchemy/api.py:5639
+#: nova/db/sqlalchemy/api.py:5652
#, python-format
msgid "IntegrityError detected when archiving table %s"
msgstr ""
@@ -5946,15 +5599,15 @@ msgstr ""
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr ""
-#: nova/db/sqlalchemy/utils.py:105
+#: nova/db/sqlalchemy/utils.py:103
msgid "Specify `table_name` or `table` param"
msgstr ""
-#: nova/db/sqlalchemy/utils.py:108
+#: nova/db/sqlalchemy/utils.py:106
msgid "Specify only one param `table_name` `table`"
msgstr ""
-#: nova/db/sqlalchemy/utils.py:131 nova/db/sqlalchemy/utils.py:135
+#: nova/db/sqlalchemy/utils.py:129 nova/db/sqlalchemy/utils.py:133
#: nova/db/sqlalchemy/migrate_repo/versions/216_havana.py:84
#: nova/db/sqlalchemy/migrate_repo/versions/216_havana.py:1103
msgid "Exception while creating table."
@@ -5964,26 +5617,26 @@ msgstr ""
msgid "Exception while seeding instance_types table"
msgstr ""
-#: nova/image/glance.py:231
+#: nova/image/glance.py:235
#, python-format
msgid ""
"Error contacting glance server '%(host)s:%(port)s' for '%(method)s', "
"%(extra)s."
msgstr ""
-#: nova/image/glance.py:265
+#: nova/image/glance.py:267
#, python-format
msgid ""
"When loading the module %(module_str)s the following error occurred: "
"%(ex)s"
msgstr ""
-#: nova/image/glance.py:303
+#: nova/image/glance.py:326
#, python-format
msgid "Failed to instantiate the download handler for %(scheme)s"
msgstr ""
-#: nova/image/glance.py:319
+#: nova/image/glance.py:342
#, python-format
msgid "Successfully transferred using %s"
msgstr ""
@@ -6129,16 +5782,16 @@ msgstr ""
msgid "Not deleting key %s"
msgstr ""
-#: nova/network/api.py:198 nova/network/neutronv2/api.py:797
+#: nova/network/api.py:196 nova/network/neutronv2/api.py:845
#, python-format
msgid "re-assign floating IP %(address)s from instance %(instance_id)s"
msgstr ""
-#: nova/network/base_api.py:49
+#: nova/network/base_api.py:48
msgid "Failed storing info cache"
msgstr ""
-#: nova/network/base_api.py:68
+#: nova/network/base_api.py:67
msgid "instance is a required argument to use @refresh_cache"
msgstr ""
@@ -6151,70 +5804,70 @@ msgstr ""
msgid "Loading network driver '%s'"
msgstr ""
-#: nova/network/floating_ips.py:90
+#: nova/network/floating_ips.py:85
#, python-format
msgid "Fixed ip %s not found"
msgstr ""
-#: nova/network/floating_ips.py:180
+#: nova/network/floating_ips.py:176
#, python-format
msgid "Floating IP %s is not associated. Ignore."
msgstr ""
-#: nova/network/floating_ips.py:199
+#: nova/network/floating_ips.py:195
#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr ""
-#: nova/network/floating_ips.py:203
+#: nova/network/floating_ips.py:199
#, python-format
msgid "Address |%(address)s| is not allocated to your project |%(project)s|"
msgstr ""
-#: nova/network/floating_ips.py:223
+#: nova/network/floating_ips.py:219
#, python-format
msgid "Quota exceeded for %s, tried to allocate floating IP"
msgstr ""
-#: nova/network/floating_ips.py:283
+#: nova/network/floating_ips.py:278
msgid "Failed to update usages deallocating floating IP"
msgstr ""
-#: nova/network/floating_ips.py:385
+#: nova/network/floating_ips.py:376
#, python-format
msgid "Failed to disassociated floating address: %s"
msgstr ""
-#: nova/network/floating_ips.py:390
+#: nova/network/floating_ips.py:381
#, python-format
msgid "Interface %s not found"
msgstr ""
-#: nova/network/floating_ips.py:553
+#: nova/network/floating_ips.py:540
#, python-format
msgid "Starting migration network for instance %s"
msgstr ""
-#: nova/network/floating_ips.py:560
+#: nova/network/floating_ips.py:546
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will not migrate it "
msgstr ""
-#: nova/network/floating_ips.py:593
+#: nova/network/floating_ips.py:575
#, python-format
msgid "Finishing migration network for instance %s"
msgstr ""
-#: nova/network/floating_ips.py:601
+#: nova/network/floating_ips.py:582
#, python-format
msgid ""
"Floating ip address |%(address)s| no longer belongs to instance "
"%(instance_uuid)s. Will notsetup it."
msgstr ""
-#: nova/network/floating_ips.py:644
+#: nova/network/floating_ips.py:625
#, python-format
msgid ""
"Database inconsistency: DNS domain |%s| is registered in the Nova db but "
@@ -6222,12 +5875,12 @@ msgid ""
"ignored."
msgstr ""
-#: nova/network/floating_ips.py:684
+#: nova/network/floating_ips.py:665
#, python-format
msgid "Domain |%(domain)s| already exists, changing zone to |%(av_zone)s|."
msgstr ""
-#: nova/network/floating_ips.py:693
+#: nova/network/floating_ips.py:674
#, python-format
msgid "Domain |%(domain)s| already exists, changing project to |%(project)s|."
msgstr ""
@@ -6256,69 +5909,69 @@ msgstr ""
msgid "This shouldn't be getting called except during testing."
msgstr ""
-#: nova/network/linux_net.py:227
+#: nova/network/linux_net.py:232
#, python-format
msgid "Attempted to remove chain %s which does not exist"
msgstr ""
-#: nova/network/linux_net.py:263
+#: nova/network/linux_net.py:268
#, python-format
msgid "Unknown chain: %r"
msgstr ""
-#: nova/network/linux_net.py:294
+#: nova/network/linux_net.py:301
#, python-format
msgid ""
"Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r "
"%(top)r"
msgstr ""
-#: nova/network/linux_net.py:762
+#: nova/network/linux_net.py:777
#, python-format
msgid "Removed %(num)d duplicate rules for floating ip %(float)s"
msgstr ""
-#: nova/network/linux_net.py:810
+#: nova/network/linux_net.py:825
#, python-format
msgid "Error deleting conntrack entries for %s"
msgstr ""
-#: nova/network/linux_net.py:1068
+#: nova/network/linux_net.py:1091
#, python-format
msgid "Hupping dnsmasq threw %s"
msgstr ""
-#: nova/network/linux_net.py:1150
+#: nova/network/linux_net.py:1172
#, python-format
msgid "killing radvd threw %s"
msgstr ""
-#: nova/network/linux_net.py:1302
+#: nova/network/linux_net.py:1333
#, python-format
msgid "Unable to execute %(cmd)s. Exception: %(exception)s"
msgstr ""
-#: nova/network/linux_net.py:1360
+#: nova/network/linux_net.py:1391
#, python-format
msgid "Failed removing net device: '%s'"
msgstr ""
-#: nova/network/linux_net.py:1532
+#: nova/network/linux_net.py:1568
#, python-format
msgid "Adding interface %(interface)s to bridge %(bridge)s"
msgstr ""
-#: nova/network/linux_net.py:1538
+#: nova/network/linux_net.py:1574
#, python-format
msgid "Failed to add interface: %s"
msgstr ""
-#: nova/network/manager.py:836
+#: nova/network/manager.py:813
#, python-format
msgid "instance-dns-zone not found |%s|."
msgstr ""
-#: nova/network/manager.py:843
+#: nova/network/manager.py:820
#, python-format
msgid ""
"instance-dns-zone is |%(domain)s|, which is in availability zone "
@@ -6326,70 +5979,65 @@ msgid ""
"created."
msgstr ""
-#: nova/network/manager.py:882
-#, python-format
-msgid "Quota exceeded for %s, tried to allocate fixed IP"
-msgstr ""
-
-#: nova/network/manager.py:942
+#: nova/network/manager.py:943
msgid "Error cleaning up fixed ip allocation. Manual cleanup may be required."
msgstr ""
-#: nova/network/manager.py:972
+#: nova/network/manager.py:973
msgid "Failed to update usages deallocating fixed IP"
msgstr ""
-#: nova/network/manager.py:996
+#: nova/network/manager.py:997
#, python-format
msgid "Unable to release %s because vif doesn't exist."
msgstr ""
-#: nova/network/manager.py:1037
+#: nova/network/manager.py:1038
#, python-format
msgid "IP %s leased that is not associated"
msgstr ""
-#: nova/network/manager.py:1043
+#: nova/network/manager.py:1044
#, python-format
msgid "IP |%s| leased that isn't allocated"
msgstr ""
-#: nova/network/manager.py:1052
+#: nova/network/manager.py:1053
#, python-format
msgid "IP %s released that is not associated"
msgstr ""
-#: nova/network/manager.py:1056
+#: nova/network/manager.py:1057
#, python-format
msgid "IP %s released that was not leased"
msgstr ""
-#: nova/network/manager.py:1074
+#: nova/network/manager.py:1075
#, python-format
msgid "%s must be an integer"
msgstr ""
-#: nova/network/manager.py:1106
+#: nova/network/manager.py:1107
msgid "Maximum allowed length for 'label' is 255."
msgstr ""
-#: nova/network/manager.py:1126
+#: nova/network/manager.py:1127
#, python-format
msgid ""
"Subnet(s) too large, defaulting to /%s. To override, specify "
"network_size flag."
msgstr ""
-#: nova/network/manager.py:1211
+#: nova/network/manager.py:1212
msgid "cidr already in use"
msgstr ""
-#: nova/network/manager.py:1214
+#: nova/network/manager.py:1215
#, python-format
msgid "requested cidr (%(cidr)s) conflicts with existing supernet (%(super)s)"
msgstr ""
-#: nova/network/manager.py:1225
+#: nova/network/manager.py:1226
#, python-format
msgid ""
"requested cidr (%(cidr)s) conflicts with existing smaller cidr "
@@ -6401,13 +6049,13 @@ msgstr ""
msgid "Network must be disassociated from project %s before delete"
msgstr ""
-#: nova/network/manager.py:1949
+#: nova/network/manager.py:1955
msgid ""
"The sum between the number of networks and the vlan start cannot be "
"greater than 4094"
msgstr ""
-#: nova/network/manager.py:1956
+#: nova/network/manager.py:1962
#, python-format
msgid ""
"The network range is not big enough to fit %(num_networks)s networks. "
@@ -6437,109 +6085,37 @@ msgstr ""
msgid "Cannot delete domain |%s|"
msgstr ""
-#: nova/network/model.py:94
+#: nova/network/model.py:96
#, python-format
msgid "Invalid IP format %s"
msgstr ""
-#: nova/network/neutronv2/api.py:212
-msgid "Neutron error: quota exceeded"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:215
-#, python-format
-msgid "Neutron error creating port on network %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:248
+#: nova/network/neutronv2/api.py:269
#, python-format
msgid "empty project id for instance %s"
msgstr ""
-#: nova/network/neutronv2/api.py:283
-msgid "No network configured!"
+#: nova/network/neutronv2/api.py:313 nova/network/neutronv2/api.py:678
+msgid "Multiple possible networks found, use a Network ID to be more specific."
msgstr ""
-#: nova/network/neutronv2/api.py:303
+#: nova/network/neutronv2/api.py:335
#, python-format
msgid ""
"Multiple security groups found matching '%s'. Use an ID to be more "
"specific."
msgstr ""
-#: nova/network/neutronv2/api.py:373
-#, python-format
-msgid "Failed to update port %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:380
-#, python-format
-msgid "Failed to delete port %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:443
+#: nova/network/neutronv2/api.py:489
#, python-format
msgid "Unable to reset device ID for port %s"
msgstr ""
-#: nova/network/neutronv2/api.py:451
-#, python-format
-msgid "Port %s does not exist"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:454 nova/network/neutronv2/api.py:478
-#, python-format
-msgid "Failed to delete neutron port %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:576
-#, python-format
-msgid ""
-"Unable to update port %(portid)s on subnet %(subnet_id)s with failure: "
-"%(exception)s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:605
-#, python-format
-msgid "Unable to update port %(portid)s with failure: %(exception)s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:632
-msgid "Multiple possible networks found, use a Network ID to be more specific."
-msgstr ""
-
-#: nova/network/neutronv2/api.py:651
-#, python-format
-msgid "Failed to access port %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:880
-#, python-format
-msgid "Unable to access floating IP %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:968
+#: nova/network/neutronv2/api.py:1021
#, python-format
msgid "Multiple floating IP pools matches found for name '%s'"
msgstr ""
-#: nova/network/neutronv2/api.py:1012
-#, python-format
-msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:1071
-#, python-format
-msgid "Unable to update host of port %s"
-msgstr ""
-
-#: nova/network/neutronv2/api.py:1107
-#, python-format
-msgid ""
-"Network %(id)s not matched with the tenants network! The ports tenant "
-"%(tenant_id)s will be used."
-msgstr ""
-
#: nova/network/security_group/neutron_driver.py:57
#, python-format
msgid "Neutron Error creating security group %s"
@@ -6622,6 +6198,14 @@ msgid ""
"%(instance)s"
msgstr ""
+#: nova/network/security_group/security_group_base.py:89
+msgid "Type and Code must be integers for ICMP protocol type"
+msgstr ""
+
+#: nova/network/security_group/security_group_base.py:92
+msgid "To and From ports must be integers"
+msgstr ""
+
#: nova/network/security_group/security_group_base.py:134
#, python-format
msgid "This rule already exists in group %s"
@@ -6632,22 +6216,22 @@ msgstr ""
msgid "Error setting %(attr)s"
msgstr ""
-#: nova/objects/base.py:247
+#: nova/objects/base.py:262
#, python-format
msgid "Unable to instantiate unregistered object type %(objtype)s"
msgstr ""
-#: nova/objects/base.py:366
+#: nova/objects/base.py:381
#, python-format
msgid "Cannot load '%s' in the base class"
msgstr ""
-#: nova/objects/base.py:412
+#: nova/objects/base.py:427
#, python-format
msgid "%(objname)s object has no attribute '%(attrname)s'"
msgstr ""
-#: nova/objects/block_device.py:136
+#: nova/objects/block_device.py:149
msgid "Volume does not belong to the requested instance."
msgstr ""
@@ -6661,44 +6245,44 @@ msgstr ""
msgid "Element %(key)s:%(val)s must be of type %(expected)s not %(actual)s"
msgstr ""
-#: nova/objects/fields.py:157
+#: nova/objects/fields.py:165
#, python-format
msgid "Field `%s' cannot be None"
msgstr ""
-#: nova/objects/fields.py:232
+#: nova/objects/fields.py:246
#, python-format
msgid "A string is required here, not %s"
msgstr ""
-#: nova/objects/fields.py:268
+#: nova/objects/fields.py:286
msgid "A datetime.datetime is required here"
msgstr ""
-#: nova/objects/fields.py:306 nova/objects/fields.py:315
-#: nova/objects/fields.py:324
+#: nova/objects/fields.py:328 nova/objects/fields.py:337
+#: nova/objects/fields.py:346
#, python-format
msgid "Network \"%s\" is not valid"
msgstr ""
-#: nova/objects/fields.py:363
+#: nova/objects/fields.py:385
msgid "A list is required here"
msgstr ""
-#: nova/objects/fields.py:379
+#: nova/objects/fields.py:405
msgid "A dict is required here"
msgstr ""
-#: nova/objects/fields.py:418
+#: nova/objects/fields.py:449
#, python-format
msgid "An object of type %s is required here"
msgstr ""
-#: nova/objects/fields.py:445
+#: nova/objects/fields.py:488
msgid "A NetworkModel is required here"
msgstr ""
-#: nova/objects/instance.py:432
+#: nova/objects/instance.py:433
#, python-format
msgid "No save handler for %s"
msgstr ""
@@ -6707,11 +6291,11 @@ msgstr ""
msgid "Failed to notify cells of instance info cache update"
msgstr ""
-#: nova/openstack/common/gettextutils.py:320
+#: nova/openstack/common/gettextutils.py:301
msgid "Message objects do not support addition."
msgstr ""
-#: nova/openstack/common/gettextutils.py:330
+#: nova/openstack/common/gettextutils.py:311
msgid ""
"Message objects do not support str() because they may contain non-ascii "
"characters. Please use unicode() or translate() instead."
@@ -6726,32 +6310,32 @@ msgstr ""
msgid "Snapshot list encountered but no header found!"
msgstr ""
-#: nova/openstack/common/lockutils.py:102
+#: nova/openstack/common/lockutils.py:101
#, python-format
msgid "Unable to acquire lock on `%(filename)s` due to %(exception)s"
msgstr ""
-#: nova/openstack/common/log.py:327
+#: nova/openstack/common/log.py:289
#, python-format
msgid "Deprecated: %s"
msgstr ""
-#: nova/openstack/common/log.py:436
+#: nova/openstack/common/log.py:397
#, python-format
msgid "Error loading logging config %(log_config)s: %(err_msg)s"
msgstr ""
-#: nova/openstack/common/log.py:486
+#: nova/openstack/common/log.py:458
#, python-format
msgid "syslog facility must be one of: %s"
msgstr ""
-#: nova/openstack/common/log.py:729
+#: nova/openstack/common/log.py:709
#, python-format
msgid "Fatal call to deprecated config: %(msg)s"
msgstr ""
-#: nova/openstack/common/periodic_task.py:39
+#: nova/openstack/common/periodic_task.py:40
#, python-format
msgid "Unexpected argument for periodic task creation: %(arg)s."
msgstr ""
@@ -6805,40 +6389,50 @@ msgstr ""
msgid "process_input not supported over SSH"
msgstr ""
-#: nova/openstack/common/sslutils.py:98
+#: nova/openstack/common/sslutils.py:95
#, python-format
msgid "Invalid SSL version : %s"
msgstr ""
-#: nova/openstack/common/strutils.py:92
+#: nova/openstack/common/strutils.py:114
#, python-format
msgid "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s"
msgstr ""
-#: nova/openstack/common/strutils.py:202
+#: nova/openstack/common/strutils.py:219
#, python-format
msgid "Invalid unit system: \"%s\""
msgstr ""
-#: nova/openstack/common/strutils.py:211
+#: nova/openstack/common/strutils.py:228
#, python-format
msgid "Invalid string format: %s"
msgstr ""
-#: nova/openstack/common/versionutils.py:69
+#: nova/openstack/common/versionutils.py:86
#, python-format
msgid ""
"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and "
"may be removed in %(remove_in)s."
msgstr ""
-#: nova/openstack/common/versionutils.py:73
+#: nova/openstack/common/versionutils.py:90
#, python-format
msgid ""
"%(what)s is deprecated as of %(as_of)s and may be removed in "
"%(remove_in)s. It will not be superseded."
msgstr ""
+#: nova/openstack/common/versionutils.py:94
+#, python-format
+msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s."
+msgstr ""
+
+#: nova/openstack/common/versionutils.py:97
+#, python-format
+msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded."
+msgstr ""
+
#: nova/openstack/common/db/sqlalchemy/migration.py:226
#, python-format
msgid ""
@@ -6852,18 +6446,18 @@ msgid ""
"the current version of the schema manually."
msgstr ""
-#: nova/openstack/common/db/sqlalchemy/utils.py:119
+#: nova/openstack/common/db/sqlalchemy/utils.py:118
msgid "Unknown sort direction, must be 'desc' or 'asc'"
msgstr ""
-#: nova/openstack/common/db/sqlalchemy/utils.py:162
+#: nova/openstack/common/db/sqlalchemy/utils.py:161
#, python-format
msgid ""
"There is no `deleted` column in `%s` table. Project doesn't use soft-"
"deleted feature."
msgstr ""
-#: nova/openstack/common/db/sqlalchemy/utils.py:181
+#: nova/openstack/common/db/sqlalchemy/utils.py:180
#, python-format
msgid "There is no `project_id` column in `%s` table."
msgstr ""
@@ -6890,7 +6484,7 @@ msgstr ""
msgid "Unsupported id columns type"
msgstr ""
-#: nova/pci/pci_manager.py:156
+#: nova/pci/pci_manager.py:113
#, python-format
msgid ""
"Trying to remove device with %(status)s ownership %(instance_uuid)s "
@@ -6922,66 +6516,73 @@ msgstr ""
msgid "Driver must implement select_destinations"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:80
+#: nova/scheduler/filter_scheduler.py:84
#, python-format
msgid ""
"Attempting to build %(num_instances)d instance(s) uuids: "
"%(instance_uuids)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:109
+#: nova/scheduler/filter_scheduler.py:113
#, python-format
msgid "Choosing host %(weighed_host)s for instance %(instance_uuid)s"
msgstr ""
-#: nova/scheduler/filter_scheduler.py:170
+#: nova/scheduler/filter_scheduler.py:173
msgid "Instance disappeared during scheduling"
msgstr ""
-#: nova/scheduler/host_manager.py:173
+#: nova/scheduler/filter_scheduler.py:219
+msgid "ServerGroupAffinityFilter not configured"
+msgstr ""
+
+#: nova/scheduler/filter_scheduler.py:224
+msgid "ServerGroupAntiAffinityFilter not configured"
+msgstr ""
+
+#: nova/scheduler/host_manager.py:169
#, python-format
msgid "Metric name unknown of %r"
msgstr ""
-#: nova/scheduler/host_manager.py:188
+#: nova/scheduler/host_manager.py:184
#, python-format
msgid ""
"Host has more disk space than database expected (%(physical)sgb > "
"%(database)sgb)"
msgstr ""
-#: nova/scheduler/host_manager.py:365
+#: nova/scheduler/host_manager.py:311
#, python-format
msgid "Host filter ignoring hosts: %s"
msgstr ""
-#: nova/scheduler/host_manager.py:377
+#: nova/scheduler/host_manager.py:323
#, python-format
msgid "Host filter forcing available hosts to %s"
msgstr ""
-#: nova/scheduler/host_manager.py:380
+#: nova/scheduler/host_manager.py:326
#, python-format
msgid "No hosts matched due to not matching 'force_hosts' value of '%s'"
msgstr ""
-#: nova/scheduler/host_manager.py:393
+#: nova/scheduler/host_manager.py:339
#, python-format
msgid "Host filter forcing available nodes to %s"
msgstr ""
-#: nova/scheduler/host_manager.py:396
+#: nova/scheduler/host_manager.py:342
#, python-format
msgid "No nodes matched due to not matching 'force_nodes' value of '%s'"
msgstr ""
-#: nova/scheduler/host_manager.py:444
-#: nova/scheduler/filters/trusted_filter.py:208
+#: nova/scheduler/host_manager.py:390
#, python-format
msgid "No service for compute ID %s"
msgstr ""
-#: nova/scheduler/host_manager.py:462
+#: nova/scheduler/host_manager.py:408
#, python-format
msgid "Removing dead compute node %(host)s:%(node)s from scheduler"
msgstr ""
@@ -7017,7 +6618,7 @@ msgstr ""
msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
msgstr ""
-#: nova/scheduler/utils.py:233
+#: nova/scheduler/utils.py:231
#, python-format
msgid "Ignoring the invalid elements of the option %(name)s: %(options)s"
msgstr ""
@@ -7027,6 +6628,10 @@ msgstr ""
msgid "%(host_state)s has not been heard from in a while"
msgstr ""
+#: nova/scheduler/filters/exact_core_filter.py:36
+msgid "VCPUs not set; assuming CPU collection broken"
+msgstr ""
+
#: nova/servicegroup/api.py:70
#, python-format
msgid "unknown ServiceGroup driver name: %s"
@@ -7106,16 +6711,6 @@ msgstr ""
msgid "ZooKeeperDriver.leave: %(id)s has not joined to the %(gr)s group"
msgstr ""
-#: nova/storage/linuxscsi.py:100
-#, python-format
-msgid "Multipath call failed exit (%(code)s)"
-msgstr ""
-
-#: nova/storage/linuxscsi.py:121
-#, python-format
-msgid "Couldn't find multipath device %s"
-msgstr ""
-
#: nova/tests/fake_ldap.py:33
msgid "Attempted to instantiate singleton"
msgstr ""
@@ -7124,15 +6719,15 @@ msgstr ""
msgid "status must be available"
msgstr ""
-#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:245
+#: nova/tests/fake_volume.py:191 nova/volume/cinder.py:290
msgid "already attached"
msgstr ""
-#: nova/tests/fake_volume.py:195 nova/volume/cinder.py:256
+#: nova/tests/fake_volume.py:195 nova/volume/cinder.py:301
msgid "Instance and volume not in same availability_zone"
msgstr ""
-#: nova/tests/fake_volume.py:200 nova/volume/cinder.py:262
+#: nova/tests/fake_volume.py:200 nova/volume/cinder.py:307
msgid "already detached"
msgstr ""
@@ -7140,49 +6735,53 @@ msgstr ""
msgid "unexpected role header"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3202
-#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2425
+#: nova/tests/api/openstack/test_faults.py:47
+msgid "Should be translated."
+msgstr ""
+
+#: nova/tests/api/openstack/compute/test_servers.py:3279
+#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2438
msgid ""
"Quota exceeded for instances: Requested 1, but already used 10 of 10 "
"instances"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3207
-#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2430
+#: nova/tests/api/openstack/compute/test_servers.py:3284
+#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2443
msgid "Quota exceeded for ram: Requested 4096, but already used 8192 of 10240 ram"
msgstr ""
-#: nova/tests/api/openstack/compute/test_servers.py:3212
-#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2435
+#: nova/tests/api/openstack/compute/test_servers.py:3289
+#: nova/tests/api/openstack/compute/plugins/v3/test_servers.py:2448
msgid "Quota exceeded for cores: Requested 2, but already used 9 of 10 cores"
msgstr ""
-#: nova/tests/compute/test_compute.py:1680
-#: nova/tests/compute/test_compute.py:1707
-#: nova/tests/compute/test_compute.py:1785
-#: nova/tests/compute/test_compute.py:1825
-#: nova/tests/compute/test_compute.py:5546
+#: nova/tests/compute/test_compute.py:1770
+#: nova/tests/compute/test_compute.py:1797
+#: nova/tests/compute/test_compute.py:1875
+#: nova/tests/compute/test_compute.py:1915
+#: nova/tests/compute/test_compute.py:5718
#, python-format
msgid "Running instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:1687
-#: nova/tests/compute/test_compute.py:1755
-#: nova/tests/compute/test_compute.py:1793
+#: nova/tests/compute/test_compute.py:1777
+#: nova/tests/compute/test_compute.py:1845
+#: nova/tests/compute/test_compute.py:1883
#, python-format
msgid "After terminating instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:5557
+#: nova/tests/compute/test_compute.py:5729
#, python-format
msgid "After force-killing instances: %s"
msgstr ""
-#: nova/tests/compute/test_compute.py:6173
+#: nova/tests/compute/test_compute.py:6345
msgid "wrong host/node"
msgstr ""
-#: nova/tests/compute/test_compute.py:10753
+#: nova/tests/compute/test_compute.py:10999
msgid "spawn error"
msgstr ""
@@ -7190,7 +6789,16 @@ msgstr ""
msgid "Keypair data is invalid"
msgstr ""
-#: nova/tests/db/test_migrations.py:866
+#: nova/tests/compute/test_resources.py:78
+#, python-format
+msgid "Free %(free)d < requested %(requested)d "
+msgstr ""
+
+#: nova/tests/compute/test_resources.py:329
+msgid "Free CPUs 2.00 VCPUs < requested 5 VCPUs"
+msgstr ""
+
+#: nova/tests/db/test_migrations.py:931
#, python-format
msgid ""
"The following migrations are missing a downgrade:\n"
@@ -7269,34 +6877,56 @@ msgstr ""
msgid "Unexpected status code"
msgstr ""
-#: nova/tests/virt/hyperv/test_hypervapi.py:512
+#: nova/tests/virt/hyperv/test_hypervapi.py:515
msgid "fake vswitch not found"
msgstr ""
-#: nova/tests/virt/hyperv/test_hypervapi.py:965
+#: nova/tests/virt/hyperv/test_hypervapi.py:968
msgid "Simulated failure"
msgstr ""
-#: nova/tests/virt/libvirt/fakelibvirt.py:1019
+#: nova/tests/virt/libvirt/fakelibvirt.py:1051
msgid "Expected a list for 'auth' parameter"
msgstr ""
-#: nova/tests/virt/libvirt/fakelibvirt.py:1023
+#: nova/tests/virt/libvirt/fakelibvirt.py:1055
msgid "Expected a function in 'auth[0]' parameter"
msgstr ""
-#: nova/tests/virt/libvirt/fakelibvirt.py:1027
+#: nova/tests/virt/libvirt/fakelibvirt.py:1059
msgid "Expected a function in 'auth[1]' parameter"
msgstr ""
-#: nova/tests/virt/libvirt/fakelibvirt.py:1038
+#: nova/tests/virt/libvirt/fakelibvirt.py:1070
msgid ""
"virEventRegisterDefaultImpl() must be called before "
"connection is used."
msgstr ""
-#: nova/tests/virt/vmwareapi/test_vm_util.py:196
-#: nova/virt/vmwareapi/vm_util.py:1087
+#: nova/tests/virt/vmwareapi/fake.py:241
+#, python-format
+msgid "Property %(attr)s not set for the managed object %(name)s"
+msgstr ""
+
+#: nova/tests/virt/vmwareapi/fake.py:985
+msgid "There is no VM registered"
+msgstr ""
+
+#: nova/tests/virt/vmwareapi/fake.py:987 nova/tests/virt/vmwareapi/fake.py:1338
+#, python-format
+msgid "Virtual Machine with ref %s is not there"
+msgstr ""
+
+#: nova/tests/virt/vmwareapi/fake.py:1127
+msgid "Session Invalid"
+msgstr ""
+
+#: nova/tests/virt/vmwareapi/fake.py:1335
+msgid "No Virtual Machine has been registered yet"
+msgstr ""
+
+#: nova/tests/virt/vmwareapi/test_ds_util.py:215
+#: nova/virt/vmwareapi/ds_util.py:261
#, python-format
msgid "Datastore regex %s did not match any datastores"
msgstr ""
@@ -7308,101 +6938,135 @@ msgid ""
"left to copy"
msgstr ""
-#: nova/tests/virt/xenapi/image/test_bittorrent.py:126
-#: nova/virt/xenapi/image/bittorrent.py:81
+#: nova/tests/virt/xenapi/image/test_bittorrent.py:125
+#: nova/virt/xenapi/image/bittorrent.py:80
msgid ""
"Cannot create default bittorrent URL without torrent_base_url set or "
"torrent URL fetcher extension"
msgstr ""
-#: nova/tests/virt/xenapi/image/test_bittorrent.py:160
-#: nova/virt/xenapi/image/bittorrent.py:85
+#: nova/tests/virt/xenapi/image/test_bittorrent.py:159
+#: nova/virt/xenapi/image/bittorrent.py:84
msgid "Multiple torrent URL fetcher extensions found. Failing."
msgstr ""
-#: nova/virt/block_device.py:243
+#: nova/virt/block_device.py:255
#, python-format
msgid "Driver failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/virt/block_device.py:362
+#: nova/virt/block_device.py:401
#, python-format
msgid "Booting with volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/virt/cpu.py:56 nova/virt/cpu.py:60
-#, python-format
-msgid "Invalid range expression %r"
-msgstr ""
-
-#: nova/virt/cpu.py:69
+#: nova/virt/diagnostics.py:143
#, python-format
-msgid "Invalid exclusion expression %r"
+msgid "Invalid type for %s"
msgstr ""
-#: nova/virt/cpu.py:76
+#: nova/virt/diagnostics.py:147
#, python-format
-msgid "Invalid inclusion expression %r"
+msgid "Invalid type for %s entry"
msgstr ""
-#: nova/virt/cpu.py:81
-#, python-format
-msgid "No CPUs available after parsing %r"
+#: nova/virt/driver.py:708
+msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr ""
-#: nova/virt/driver.py:1207
+#: nova/virt/driver.py:1264
msgid "Event must be an instance of nova.virt.event.Event"
msgstr ""
-#: nova/virt/driver.py:1213
+#: nova/virt/driver.py:1270
#, python-format
msgid "Exception dispatching event %(event)s: %(ex)s"
msgstr ""
-#: nova/virt/driver.py:1295
+#: nova/virt/driver.py:1364
msgid "Compute driver option required, but not specified"
msgstr ""
-#: nova/virt/driver.py:1298
+#: nova/virt/driver.py:1367
#, python-format
msgid "Loading compute driver '%s'"
msgstr ""
-#: nova/virt/driver.py:1305
+#: nova/virt/driver.py:1374
msgid "Unable to load the virtualization driver"
msgstr ""
-#: nova/virt/fake.py:216
+#: nova/virt/event.py:33
+msgid "Started"
+msgstr ""
+
+#: nova/virt/event.py:34
+msgid "Stopped"
+msgstr ""
+
+#: nova/virt/event.py:35
+msgid "Paused"
+msgstr ""
+
+#: nova/virt/event.py:36
+msgid "Resumed"
+msgstr ""
+
+#: nova/virt/event.py:108
+msgid "Unknown"
+msgstr ""
+
+#: nova/virt/fake.py:217
#, python-format
msgid "Key '%(key)s' not in instances '%(inst)s'"
msgstr ""
-#: nova/virt/firewall.py:178
+#: nova/virt/firewall.py:174
msgid "Attempted to unfilter instance which is not filtered"
msgstr ""
-#: nova/virt/images.py:86
+#: nova/virt/hardware.py:46
+#, python-format
+msgid "No CPUs available after parsing %r"
+msgstr ""
+
+#: nova/virt/hardware.py:78 nova/virt/hardware.py:82
+#, python-format
+msgid "Invalid range expression %r"
+msgstr ""
+
+#: nova/virt/hardware.py:91
+#, python-format
+msgid "Invalid exclusion expression %r"
+msgstr ""
+
+#: nova/virt/hardware.py:98
+#, python-format
+msgid "Invalid inclusion expression %r"
+msgstr ""
+
+#: nova/virt/images.py:81
msgid "'qemu-img info' parsing failed."
msgstr ""
-#: nova/virt/images.py:92
+#: nova/virt/images.py:87
#, python-format
msgid "fmt=%(fmt)s backed by: %(backing_file)s"
msgstr ""
-#: nova/virt/images.py:105
+#: nova/virt/images.py:100
#, python-format
msgid ""
"%(base)s virtual size %(disk_size)s larger than flavor root disk size "
"%(size)s"
msgstr ""
-#: nova/virt/images.py:122
+#: nova/virt/images.py:117
#, python-format
msgid "Converted to raw, but format is now %s"
msgstr ""
-#: nova/virt/storage_users.py:63 nova/virt/storage_users.py:101
+#: nova/virt/storage_users.py:64 nova/virt/storage_users.py:102
#, python-format
msgid "Cannot decode JSON from %(id_path)s"
msgstr ""
@@ -7435,37 +7099,37 @@ msgstr ""
msgid "Baremetal node id not supplied to driver for %r"
msgstr ""
-#: nova/virt/baremetal/driver.py:289
+#: nova/virt/baremetal/driver.py:292
#, python-format
msgid "Error deploying instance %(instance)s on baremetal node %(node)s."
msgstr ""
-#: nova/virt/baremetal/driver.py:364
+#: nova/virt/baremetal/driver.py:367
#, python-format
msgid "Baremetal power manager failed to restart node for instance %r"
msgstr ""
-#: nova/virt/baremetal/driver.py:375
+#: nova/virt/baremetal/driver.py:379
#, python-format
msgid "Destroy called on non-existing instance %s"
msgstr ""
-#: nova/virt/baremetal/driver.py:393
+#: nova/virt/baremetal/driver.py:397
#, python-format
msgid "Error from baremetal driver during destroy: %s"
msgstr ""
-#: nova/virt/baremetal/driver.py:398
+#: nova/virt/baremetal/driver.py:402
#, python-format
msgid "Error while recording destroy failure in baremetal database: %s"
msgstr ""
-#: nova/virt/baremetal/driver.py:413
+#: nova/virt/baremetal/driver.py:417
#, python-format
msgid "Baremetal power manager failed to stop node for instance %r"
msgstr ""
-#: nova/virt/baremetal/driver.py:426
+#: nova/virt/baremetal/driver.py:430
#, python-format
msgid "Baremetal power manager failed to start node for instance %r"
msgstr ""
@@ -7550,7 +7214,7 @@ msgid ""
"passed to baremetal driver: %s"
msgstr ""
-#: nova/virt/baremetal/pxe.py:465 nova/virt/baremetal/tilera.py:317
+#: nova/virt/baremetal/pxe.py:465 nova/virt/baremetal/tilera.py:318
#, python-format
msgid "Node associated with another instance while waiting for deploy of %s"
msgstr ""
@@ -7570,7 +7234,7 @@ msgstr ""
msgid "PXE deploy failed for instance %s"
msgstr ""
-#: nova/virt/baremetal/pxe.py:483 nova/virt/baremetal/tilera.py:342
+#: nova/virt/baremetal/pxe.py:483 nova/virt/baremetal/tilera.py:343
#, python-format
msgid "Baremetal node deleted while waiting for deployment of instance %s"
msgstr ""
@@ -7587,21 +7251,21 @@ msgid ""
"not passed to baremetal driver: %s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:323
+#: nova/virt/baremetal/tilera.py:324
#, python-format
msgid "Tilera deploy started for instance %s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:329
+#: nova/virt/baremetal/tilera.py:330
#, python-format
msgid "Tilera deploy completed for instance %s"
msgstr ""
-#: nova/virt/baremetal/tilera.py:337
+#: nova/virt/baremetal/tilera.py:338
msgid "Node is unknown error state."
msgstr ""
-#: nova/virt/baremetal/tilera.py:340
+#: nova/virt/baremetal/tilera.py:341
#, python-format
msgid "Tilera deploy failed for instance %s"
msgstr ""
@@ -7703,86 +7367,69 @@ msgstr ""
msgid "baremetal driver was unable to delete tid %s"
msgstr ""
-#: nova/virt/baremetal/volume_driver.py:195 nova/virt/hyperv/volumeops.py:189
+#: nova/virt/baremetal/volume_driver.py:195 nova/virt/hyperv/volumeops.py:196
msgid "Could not determine iscsi initiator name"
msgstr ""
-#: nova/virt/baremetal/volume_driver.py:234
+#: nova/virt/baremetal/volume_driver.py:225
#, python-format
msgid "No fixed PXE IP is associated to %s"
msgstr ""
-#: nova/virt/baremetal/volume_driver.py:288
+#: nova/virt/baremetal/volume_driver.py:283
#, python-format
msgid "detach volume could not find tid for %s"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:198
+#: nova/virt/baremetal/db/sqlalchemy/api.py:199
msgid "instance_uuid must be supplied to bm_node_associate_and_update"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:210
+#: nova/virt/baremetal/db/sqlalchemy/api.py:211
#, python-format
msgid "Failed to associate instance %(i_uuid)s to baremetal node %(n_uuid)s."
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:245
-#: nova/virt/baremetal/db/sqlalchemy/api.py:287
+#: nova/virt/baremetal/db/sqlalchemy/api.py:246
+#: nova/virt/baremetal/db/sqlalchemy/api.py:288
#, python-format
msgid "Baremetal interface %s not found"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:297
+#: nova/virt/baremetal/db/sqlalchemy/api.py:298
#, python-format
msgid "Baremetal interface %s already in use"
msgstr ""
-#: nova/virt/baremetal/db/sqlalchemy/api.py:310
+#: nova/virt/baremetal/db/sqlalchemy/api.py:311
#, python-format
msgid "Baremetal virtual interface %s not found"
msgstr ""
-#: nova/virt/disk/api.py:285
+#: nova/virt/disk/api.py:292
msgid "image already mounted"
msgstr ""
-#: nova/virt/disk/api.py:359
-#, python-format
-msgid "Ignoring error injecting data into image (%(e)s)"
-msgstr ""
-
-#: nova/virt/disk/api.py:381
-#, python-format
-msgid ""
-"Failed to mount container filesystem '%(image)s' on '%(target)s': "
-"%(errors)s"
-msgstr ""
-
-#: nova/virt/disk/api.py:411
+#: nova/virt/disk/api.py:418
#, python-format
msgid "Failed to teardown container filesystem: %s"
msgstr ""
-#: nova/virt/disk/api.py:424
+#: nova/virt/disk/api.py:431
#, python-format
msgid "Failed to umount container filesystem: %s"
msgstr ""
-#: nova/virt/disk/api.py:449
-#, python-format
-msgid "Ignoring error injecting %(inject)s into image (%(e)s)"
-msgstr ""
-
-#: nova/virt/disk/api.py:609
+#: nova/virt/disk/api.py:616
msgid "Not implemented on Windows"
msgstr ""
-#: nova/virt/disk/api.py:636
+#: nova/virt/disk/api.py:643
#, python-format
msgid "User %(username)s not found in password file."
msgstr ""
-#: nova/virt/disk/api.py:652
+#: nova/virt/disk/api.py:659
#, python-format
msgid "User %(username)s not found in shadow file."
msgstr ""
@@ -7862,44 +7509,44 @@ msgstr ""
msgid "Detaching from erroneous nbd device returned error: %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:64
+#: nova/virt/disk/vfs/guestfs.py:77
#, python-format
msgid "No operating system found in %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:70
+#: nova/virt/disk/vfs/guestfs.py:83
#, python-format
msgid "Multi-boot operating system found in %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:81
+#: nova/virt/disk/vfs/guestfs.py:94
#, python-format
msgid "No mount points found in %(root)s of %(imgfile)s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:95
+#: nova/virt/disk/vfs/guestfs.py:108
#, python-format
msgid ""
"Error mounting %(device)s to %(dir)s in image %(imgfile)s with libguestfs"
" (%(e)s)"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:131
+#: nova/virt/disk/vfs/guestfs.py:156
#, python-format
msgid "Error mounting %(imgfile)s with libguestfs (%(e)s)"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:147
+#: nova/virt/disk/vfs/guestfs.py:172
#, python-format
msgid "Failed to close augeas %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:155
+#: nova/virt/disk/vfs/guestfs.py:180
#, python-format
msgid "Failed to shutdown appliance %s"
msgstr ""
-#: nova/virt/disk/vfs/guestfs.py:163
+#: nova/virt/disk/vfs/guestfs.py:188
#, python-format
msgid "Failed to close guest handle %s"
msgstr ""
@@ -7913,11 +7560,11 @@ msgstr ""
msgid "The ISCSI initiator name can't be found. Choosing the default one"
msgstr ""
-#: nova/virt/hyperv/driver.py:165
+#: nova/virt/hyperv/driver.py:169
msgid "VIF plugging is not supported by the Hyper-V driver."
msgstr ""
-#: nova/virt/hyperv/driver.py:170
+#: nova/virt/hyperv/driver.py:174
msgid "VIF unplugging is not supported by the Hyper-V driver."
msgstr ""
@@ -7965,22 +7612,27 @@ msgstr ""
msgid "Duplicate VM name found: %s"
msgstr ""
-#: nova/virt/hyperv/migrationops.py:97
+#: nova/virt/hyperv/migrationops.py:98
msgid "Cannot cleanup migration files"
msgstr ""
-#: nova/virt/hyperv/migrationops.py:105
+#: nova/virt/hyperv/migrationops.py:106
#, python-format
msgid ""
"Cannot resize the root disk to a smaller size. Current size: "
"%(curr_root_gb)s GB. Requested size: %(new_root_gb)s GB"
msgstr ""
-#: nova/virt/hyperv/migrationops.py:200
+#: nova/virt/hyperv/migrationops.py:155
+#, python-format
+msgid "Config drive is required by instance: %s, but it does not exist."
+msgstr ""
+
+#: nova/virt/hyperv/migrationops.py:214
msgid "Cannot resize a VHD to a smaller size"
msgstr ""
-#: nova/virt/hyperv/migrationops.py:245
+#: nova/virt/hyperv/migrationops.py:259
#, python-format
msgid "Cannot find boot VHD file for instance: %s"
msgstr ""
@@ -7999,7 +7651,7 @@ msgstr ""
msgid "No external vswitch found"
msgstr ""
-#: nova/virt/hyperv/pathutils.py:71
+#: nova/virt/hyperv/pathutils.py:73
#, python-format
msgid "The file copy from %(src)s to %(dest)s failed"
msgstr ""
@@ -8009,30 +7661,32 @@ msgstr ""
msgid "Failed to remove snapshot for VM %s"
msgstr ""
-#: nova/virt/hyperv/vhdutils.py:65 nova/virt/hyperv/vhdutilsv2.py:63
+#: nova/virt/hyperv/utilsfactory.py:68
+msgid ""
+"The \"force_hyperv_utils_v1\" option cannot be set to \"True\" on Windows"
+" Server / Hyper-V Server 2012 R2 or above as the WMI "
+"\"root/virtualization\" namespace is no longer supported."
+msgstr ""
+
+#: nova/virt/hyperv/vhdutils.py:66 nova/virt/hyperv/vhdutilsv2.py:64
#, python-format
msgid "Unsupported disk format: %s"
msgstr ""
-#: nova/virt/hyperv/vhdutils.py:150
-#, python-format
-msgid "The %(vhd_type)s type VHD is not supported"
+#: nova/virt/hyperv/vhdutils.py:77
+msgid "VHD differencing disks cannot be resized"
msgstr ""
-#: nova/virt/hyperv/vhdutils.py:161
+#: nova/virt/hyperv/vhdutils.py:165
#, python-format
msgid "Unable to obtain block size from VHD %(vhd_path)s"
msgstr ""
-#: nova/virt/hyperv/vhdutils.py:208
+#: nova/virt/hyperv/vhdutils.py:212
msgid "Unsupported virtual disk format"
msgstr ""
-#: nova/virt/hyperv/vhdutilsv2.py:134
-msgid "Differencing VHDX images are not supported"
-msgstr ""
-
-#: nova/virt/hyperv/vhdutilsv2.py:157
+#: nova/virt/hyperv/vhdutilsv2.py:160
#, python-format
msgid "Unable to obtain internal size from VHDX: %(vhd_path)s. Exception: %(ex)s"
msgstr ""
@@ -8042,46 +7696,46 @@ msgstr ""
msgid "VIF driver not found for network_api_class: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:169
+#: nova/virt/hyperv/vmops.py:198
#, python-format
msgid ""
-"Cannot resize a VHD to a smaller size, the original size is "
-"%(base_vhd_size)s, the newer size is %(root_vhd_size)s"
+"Cannot resize a VHD to a smaller size, the original size is %(old_size)s,"
+" the newer size is %(new_size)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:206
+#: nova/virt/hyperv/vmops.py:228
msgid "Spawning new instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:280 nova/virt/vmwareapi/vmops.py:520
+#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:574
#, python-format
msgid "Invalid config_drive_format \"%s\""
msgstr ""
-#: nova/virt/hyperv/vmops.py:283 nova/virt/vmwareapi/vmops.py:524
+#: nova/virt/hyperv/vmops.py:307 nova/virt/vmwareapi/vmops.py:578
msgid "Using config drive for instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:296
+#: nova/virt/hyperv/vmops.py:320
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:304 nova/virt/vmwareapi/vmops.py:549
+#: nova/virt/hyperv/vmops.py:328 nova/virt/vmwareapi/vmops.py:603
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:340
+#: nova/virt/hyperv/vmops.py:371
msgid "Got request to destroy instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:359
+#: nova/virt/hyperv/vmops.py:390
#, python-format
msgid "Failed to destroy instance: %s"
msgstr ""
-#: nova/virt/hyperv/vmops.py:412
+#: nova/virt/hyperv/vmops.py:443
#, python-format
msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
msgstr ""
@@ -8123,12 +7777,12 @@ msgstr ""
msgid "Metrics collection is not supported on this version of Hyper-V"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:146
+#: nova/virt/hyperv/volumeops.py:148
#, python-format
msgid "Unable to attach volume to instance %s"
msgstr ""
-#: nova/virt/hyperv/volumeops.py:215 nova/virt/hyperv/volumeops.py:229
+#: nova/virt/hyperv/volumeops.py:222 nova/virt/hyperv/volumeops.py:236
#, python-format
msgid "Unable to find a mounted disk for target_iqn: %s"
msgstr ""
@@ -8158,136 +7812,151 @@ msgstr ""
msgid "Unable to determine disk bus for '%s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:542
+#: nova/virt/libvirt/driver.py:550
#, python-format
msgid "Connection to libvirt lost: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:724
+#: nova/virt/libvirt/driver.py:739
#, python-format
msgid "Can not handle authentication request for %d credentials"
msgstr ""
-#: nova/virt/libvirt/driver.py:868
+#: nova/virt/libvirt/driver.py:922
msgid "operation time out"
msgstr ""
-#: nova/virt/libvirt/driver.py:1187
+#: nova/virt/libvirt/driver.py:1246
#, python-format
msgid ""
"Volume sets block size, but the current libvirt hypervisor '%s' does not "
"support custom block size"
msgstr ""
-#: nova/virt/libvirt/driver.py:1194
+#: nova/virt/libvirt/driver.py:1253
#, python-format
msgid "Volume sets block size, but libvirt '%s' or later is required."
msgstr ""
-#: nova/virt/libvirt/driver.py:1292
+#: nova/virt/libvirt/driver.py:1351
msgid "Swap only supports host devices"
msgstr ""
-#: nova/virt/libvirt/driver.py:1579
+#: nova/virt/libvirt/driver.py:1638
msgid "libvirt error while requesting blockjob info."
msgstr ""
-#: nova/virt/libvirt/driver.py:1712
+#: nova/virt/libvirt/driver.py:1783
msgid "Found no disk to snapshot."
msgstr ""
-#: nova/virt/libvirt/driver.py:1790
+#: nova/virt/libvirt/driver.py:1875
#, python-format
msgid "Unknown type: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1795
+#: nova/virt/libvirt/driver.py:1880
msgid "snapshot_id required in create_info"
msgstr ""
-#: nova/virt/libvirt/driver.py:1853
+#: nova/virt/libvirt/driver.py:1938
#, python-format
msgid "Libvirt '%s' or later is required for online deletion of volume snapshots."
msgstr ""
-#: nova/virt/libvirt/driver.py:1860
+#: nova/virt/libvirt/driver.py:1945
#, python-format
msgid "Unknown delete_info type %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1890
+#: nova/virt/libvirt/driver.py:1981
+#, python-format
+msgid "Disk with id: %s not found attached to instance."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1990
+msgid "filename cannot be None"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2019
+#, python-format
+msgid "no match found for %s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2076
#, python-format
-msgid "Unable to locate disk matching id: %s"
+msgid ""
+"Relative blockcommit support was not detected. Libvirt '%s' or later is "
+"required for online deletion of network storage-backed volume snapshots."
msgstr ""
-#: nova/virt/libvirt/driver.py:2330 nova/virt/xenapi/vmops.py:1552
+#: nova/virt/libvirt/driver.py:2491 nova/virt/xenapi/vmops.py:1561
msgid "Guest does not have a console available"
msgstr ""
-#: nova/virt/libvirt/driver.py:2746
+#: nova/virt/libvirt/driver.py:2820
+#, python-format
+msgid "%s format is not supported"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2926
#, python-format
msgid "Detaching PCI devices with libvirt < %(ver)s is not permitted"
msgstr ""
-#: nova/virt/libvirt/driver.py:2912
+#: nova/virt/libvirt/driver.py:3069
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
msgstr ""
-#: nova/virt/libvirt/driver.py:2918
+#: nova/virt/libvirt/driver.py:3075
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
-#: nova/virt/libvirt/driver.py:2922
+#: nova/virt/libvirt/driver.py:3079
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
-#: nova/virt/libvirt/driver.py:2942
-msgid ""
-"Passthrough of the host CPU was requested but this libvirt version does "
-"not support this feature"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:3475
+#: nova/virt/libvirt/driver.py:3689
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_id)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3496
+#: nova/virt/libvirt/driver.py:3710
#, python-format
msgid ""
"Error from libvirt while looking up %(instance_name)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3760
+#: nova/virt/libvirt/driver.py:3976
msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range."
msgstr ""
-#: nova/virt/libvirt/driver.py:3890
+#: nova/virt/libvirt/driver.py:4101
msgid "libvirt version is too old (does not support getVersion)"
msgstr ""
-#: nova/virt/libvirt/driver.py:4251
+#: nova/virt/libvirt/driver.py:4462
msgid "Block migration can not be used with shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:4259
+#: nova/virt/libvirt/driver.py:4471
msgid "Live migration can not be used without shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:4303
+#: nova/virt/libvirt/driver.py:4541
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:4342
+#: nova/virt/libvirt/driver.py:4580
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -8297,12 +7966,38 @@ msgid ""
"Refer to %(u)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4409
+#: nova/virt/libvirt/driver.py:4643
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:4900
+#: nova/virt/libvirt/driver.py:4706
+msgid ""
+"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag "
+"or your destination node does not support retrieving listen addresses. "
+"In order for live migration to work properly, you must configure the "
+"graphics (VNC and/or SPICE) listen addresses to be either the catch-all "
+"address (0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:4723
+msgid ""
+"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag,"
+" and the graphics (VNC and/or SPICE) listen addresses on the destination"
+" node do not match the addresses on the source node. Since the source "
+"node has listen addresses set to either the catch-all address (0.0.0.0 or"
+" ::) or the local address (127.0.0.1 or ::1), the live migration will "
+"succeed, but the VM will continue to listen on the current addresses."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:5100
+#, python-format
+msgid ""
+"Error from libvirt while getting description of %(instance_name)s: [Error"
+" Code %(error_code)s] %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:5226
msgid "Unable to resize disk down."
msgstr ""
@@ -8315,316 +8010,290 @@ msgstr ""
msgid "Attempted overwrite of an existing value."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:429
+#: nova/virt/libvirt/imagebackend.py:316
+msgid "clone() is not implemented"
+msgstr ""
+
+#: nova/virt/libvirt/imagebackend.py:449
msgid "You should specify images_volume_group flag to use LVM images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:544
+#: nova/virt/libvirt/imagebackend.py:522
msgid "You should specify images_rbd_pool flag to use rbd images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:658
-msgid "rbd python libraries not found"
+#: nova/virt/libvirt/imagebackend.py:612
+msgid "installed version of librbd does not support cloning"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:697
+#: nova/virt/libvirt/imagebackend.py:623
+msgid "Image is not raw format"
+msgstr ""
+
+#: nova/virt/libvirt/imagebackend.py:631
+msgid "No image locations are accessible"
+msgstr ""
+
+#: nova/virt/libvirt/imagebackend.py:651
#, python-format
msgid "Unknown image_type=%s"
msgstr ""
-#: nova/virt/libvirt/lvm.py:55
+#: nova/virt/libvirt/lvm.py:54
#, python-format
msgid ""
"Insufficient Space on Volume Group %(vg)s. Only %(free_space)db "
"available, but %(size)db required by volume %(lv)s."
msgstr ""
-#: nova/virt/libvirt/lvm.py:103
+#: nova/virt/libvirt/lvm.py:102
#, python-format
msgid "vg %s must be LVM volume group"
msgstr ""
-#: nova/virt/libvirt/lvm.py:146
+#: nova/virt/libvirt/lvm.py:145
#, python-format
msgid "Path %s must be LVM logical volume"
msgstr ""
-#: nova/virt/libvirt/lvm.py:222
+#: nova/virt/libvirt/lvm.py:221
#, python-format
msgid "volume_clear='%s' is not handled"
msgstr ""
+#: nova/virt/libvirt/rbd_utils.py:104
+msgid "rbd python libraries not found"
+msgstr ""
+
+#: nova/virt/libvirt/rbd_utils.py:159
+msgid "Not stored in rbd"
+msgstr ""
+
+#: nova/virt/libvirt/rbd_utils.py:163
+msgid "Blank components"
+msgstr ""
+
+#: nova/virt/libvirt/rbd_utils.py:166
+msgid "Not an rbd snapshot"
+msgstr ""
+
#: nova/virt/libvirt/utils.py:79
msgid "Cannot find any Fibre Channel HBAs"
msgstr ""
-#: nova/virt/libvirt/utils.py:431
+#: nova/virt/libvirt/utils.py:391
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
-#: nova/virt/libvirt/vif.py:353 nova/virt/libvirt/vif.py:608
-#: nova/virt/libvirt/vif.py:797
+#: nova/virt/libvirt/vif.py:322 nova/virt/libvirt/vif.py:508
+#: nova/virt/libvirt/vif.py:652
msgid "vif_type parameter must be present for this vif_driver implementation"
msgstr ""
-#: nova/virt/libvirt/vif.py:397 nova/virt/libvirt/vif.py:628
-#: nova/virt/libvirt/vif.py:817
+#: nova/virt/libvirt/vif.py:328 nova/virt/libvirt/vif.py:514
+#: nova/virt/libvirt/vif.py:658
#, python-format
msgid "Unexpected vif_type=%s"
msgstr ""
-#: nova/virt/libvirt/volume.py:291
+#: nova/virt/libvirt/volume.py:294
#, python-format
msgid "iSCSI device not found at %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:737
+#: nova/virt/libvirt/volume.py:740
#, python-format
msgid "AoE device not found at %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:909
+#: nova/virt/libvirt/volume.py:912
msgid "We are unable to locate any Fibre Channel devices"
msgstr ""
-#: nova/virt/libvirt/volume.py:928
+#: nova/virt/libvirt/volume.py:931
msgid "Fibre Channel device not found."
msgstr ""
-#: nova/virt/vmwareapi/driver.py:103
-msgid ""
-"The VMware ESX driver is now deprecated and will be removed in the Juno "
-"release. The VC driver will remain and continue to be supported."
-msgstr ""
-
-#: nova/virt/vmwareapi/driver.py:115
+#: nova/virt/vmwareapi/driver.py:125
msgid ""
"Must specify host_ip, host_username and host_password to use "
-"compute_driver=vmwareapi.VMwareESXDriver or vmwareapi.VMwareVCDriver"
+"vmwareapi.VMwareVCDriver"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:127
+#: nova/virt/vmwareapi/driver.py:134
#, python-format
msgid "Invalid Regular Expression %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:242
-msgid "Instance cannot be found in host, or in an unknownstate."
-msgstr ""
-
-#: nova/virt/vmwareapi/driver.py:398
+#: nova/virt/vmwareapi/driver.py:148
#, python-format
msgid "All clusters specified %s were not found in the vCenter"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:407
-#, python-format
-msgid "The following clusters could not be found in the vCenter %s"
-msgstr ""
-
-#: nova/virt/vmwareapi/driver.py:544
+#: nova/virt/vmwareapi/driver.py:342
#, python-format
msgid "The resource %s does not exist"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:590
+#: nova/virt/vmwareapi/driver.py:404
#, python-format
msgid "Invalid cluster or resource pool name : %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:757
+#: nova/virt/vmwareapi/driver.py:582
msgid ""
"Multiple hosts may be managed by the VMWare vCenter driver; therefore we "
"do not return uptime for just one host."
msgstr ""
-#: nova/virt/vmwareapi/driver.py:845
-#, python-format
-msgid ""
-"Unable to connect to server at %(server)s, sleeping for %(seconds)s "
-"seconds"
-msgstr ""
-
-#: nova/virt/vmwareapi/driver.py:865
+#: nova/virt/vmwareapi/driver.py:705
#, python-format
msgid "Unable to validate session %s!"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:906
+#: nova/virt/vmwareapi/driver.py:747
#, python-format
msgid "Session %s is inactive!"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:954
-#, python-format
-msgid "In vmwareapi: _call_method (session=%s)"
-msgstr ""
-
-#: nova/virt/vmwareapi/driver.py:998
+#: nova/virt/vmwareapi/driver.py:838
#, python-format
msgid "Task [%(task_name)s] %(task_ref)s status: error %(error_info)s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:1008
+#: nova/virt/vmwareapi/driver.py:848
#, python-format
msgid "In vmwareapi:_poll_task, Got this error %s"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:38
+#: nova/virt/vmwareapi/ds_util.py:41
msgid "Datastore name cannot be None"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:40
+#: nova/virt/vmwareapi/ds_util.py:43
msgid "Datastore reference cannot be None"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:42
+#: nova/virt/vmwareapi/ds_util.py:45
msgid "Invalid capacity"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:45
+#: nova/virt/vmwareapi/ds_util.py:48
msgid "Capacity is smaller than free space"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:106
+#: nova/virt/vmwareapi/ds_util.py:111
msgid "datastore name empty"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:111
+#: nova/virt/vmwareapi/ds_util.py:116 nova/virt/vmwareapi/ds_util.py:148
msgid "path component cannot be None"
msgstr ""
-#: nova/virt/vmwareapi/ds_util.py:144
+#: nova/virt/vmwareapi/ds_util.py:162
msgid "datastore path empty"
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:46
+#: nova/virt/vmwareapi/error_util.py:45
msgid "exception_summary must not be a list"
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:76
+#: nova/virt/vmwareapi/error_util.py:75
msgid "fault_list must be a list"
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:122
+#: nova/virt/vmwareapi/error_util.py:121
#, python-format
msgid "Error(s) %s occurred in the call to RetrievePropertiesEx"
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:136
+#: nova/virt/vmwareapi/error_util.py:135
msgid "VMware Driver fault."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:142
+#: nova/virt/vmwareapi/error_util.py:141
msgid "VMware Driver configuration fault."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:146
+#: nova/virt/vmwareapi/error_util.py:145
msgid "No default value for use_linked_clone found."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:150
+#: nova/virt/vmwareapi/error_util.py:149
#, python-format
msgid "Missing parameter : %(param)s"
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:154
+#: nova/virt/vmwareapi/error_util.py:153
msgid "No root disk defined."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:158
+#: nova/virt/vmwareapi/error_util.py:157
msgid "Resource already exists."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:163
+#: nova/virt/vmwareapi/error_util.py:162
msgid "Cannot delete file."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:168
+#: nova/virt/vmwareapi/error_util.py:167
msgid "File already exists."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:173
+#: nova/virt/vmwareapi/error_util.py:172
msgid "File fault."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:178
+#: nova/virt/vmwareapi/error_util.py:177
msgid "File locked."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:183
+#: nova/virt/vmwareapi/error_util.py:182
msgid "File not found."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:188
+#: nova/virt/vmwareapi/error_util.py:187
msgid "Invalid property."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:193
+#: nova/virt/vmwareapi/error_util.py:192
msgid "No Permission."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:198
+#: nova/virt/vmwareapi/error_util.py:197
msgid "Not Authenticated."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:203
+#: nova/virt/vmwareapi/error_util.py:202
msgid "Invalid Power State."
msgstr ""
-#: nova/virt/vmwareapi/error_util.py:228
+#: nova/virt/vmwareapi/error_util.py:227
#, python-format
msgid "Fault %s not matched."
msgstr ""
-#: nova/virt/vmwareapi/fake.py:243
-#, python-format
-msgid "Property %(attr)s not set for the managed object %(name)s"
-msgstr ""
-
-#: nova/virt/vmwareapi/fake.py:967
-msgid "There is no VM registered"
-msgstr ""
-
-#: nova/virt/vmwareapi/fake.py:969 nova/virt/vmwareapi/fake.py:1290
-#, python-format
-msgid "Virtual Machine with ref %s is not there"
-msgstr ""
-
-#: nova/virt/vmwareapi/fake.py:1052
-#, python-format
-msgid "Logging out a session that is invalid or already logged out: %s"
-msgstr ""
-
-#: nova/virt/vmwareapi/fake.py:1070
-msgid "Session Invalid"
-msgstr ""
-
-#: nova/virt/vmwareapi/fake.py:1287
-msgid "No Virtual Machine has been registered yet"
-msgstr ""
-
#: nova/virt/vmwareapi/imagecache.py:74
#, python-format
msgid "Unable to delete %(file)s. Exception: %(ex)s"
msgstr ""
-#: nova/virt/vmwareapi/imagecache.py:148
+#: nova/virt/vmwareapi/imagecache.py:147
#, python-format
msgid "Image %s is no longer used by this node. Pending deletion!"
msgstr ""
-#: nova/virt/vmwareapi/imagecache.py:153
+#: nova/virt/vmwareapi/imagecache.py:152
#, python-format
msgid "Image %s is no longer used. Deleting!"
msgstr ""
-#: nova/virt/vmwareapi/io_util.py:121
+#: nova/virt/vmwareapi/io_util.py:122
#, python-format
msgid "Glance image %s is in killed state"
msgstr ""
-#: nova/virt/vmwareapi/io_util.py:129
+#: nova/virt/vmwareapi/io_util.py:130
#, python-format
msgid "Glance image %(image_id)s is in unknown state - %(state)s"
msgstr ""
@@ -8681,136 +8350,144 @@ msgstr ""
msgid "Unable to retrieve value for %(path)s Reason: %(reason)s"
msgstr ""
-#: nova/virt/vmwareapi/vm_util.py:195
+#: nova/virt/vmwareapi/vm_util.py:202
#, python-format
msgid "%s is not supported."
msgstr ""
-#: nova/virt/vmwareapi/vm_util.py:980
+#: nova/virt/vmwareapi/vm_util.py:1037
msgid "No host available on cluster"
msgstr ""
-#: nova/virt/vmwareapi/vm_util.py:1210
+#: nova/virt/vmwareapi/vm_util.py:1131
#, python-format
msgid "Failed to get cluster references %s"
msgstr ""
-#: nova/virt/vmwareapi/vm_util.py:1222
+#: nova/virt/vmwareapi/vm_util.py:1143
#, python-format
msgid "Failed to get resource pool references %s"
msgstr ""
-#: nova/virt/vmwareapi/vm_util.py:1404
+#: nova/virt/vmwareapi/vm_util.py:1334
msgid "vmwareapi:vm_util:clone_vmref_for_instance, called with vm_ref=None"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:131
+#: nova/virt/vmwareapi/vmops.py:132
#, python-format
msgid "Extending virtual disk failed with error: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:246
+#: nova/virt/vmwareapi/vmops.py:252
msgid "Image disk size greater than requested disk size"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:471
-#, python-format
-msgid "Root disk file creation failed - %s"
+#: nova/virt/vmwareapi/vmops.py:859
+msgid "instance is not powered on"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:813
-msgid "instance is not powered on"
+#: nova/virt/vmwareapi/vmops.py:887
+msgid "Instance does not exist on backend"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:869
+#: nova/virt/vmwareapi/vmops.py:914
#, python-format
msgid ""
"In vmwareapi:vmops:_destroy_instance, got this exception while un-"
"registering the VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:892
-#, python-format
+#: nova/virt/vmwareapi/vmops.py:937
msgid ""
-"In vmwareapi:vmops:_destroy_instance, got this exception while deleting "
-"the VM contents from the disk: %s"
+"In vmwareapi:vmops:_destroy_instance, exception while deleting the VM "
+"contents from the disk"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:926
+#: nova/virt/vmwareapi/vmops.py:969
msgid "pause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:930
+#: nova/virt/vmwareapi/vmops.py:973
msgid "unpause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:948
+#: nova/virt/vmwareapi/vmops.py:991
msgid "instance is powered off and cannot be suspended."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:968
+#: nova/virt/vmwareapi/vmops.py:1011
msgid "instance is not in a suspended state"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1056
-msgid "instance is suspended and cannot be powered off."
+#: nova/virt/vmwareapi/vmops.py:1111
+msgid "Unable to shrink disk."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1147
+#: nova/virt/vmwareapi/vmops.py:1170
#, python-format
msgid ""
"In vmwareapi:vmops:confirm_migration, got this exception while destroying"
" the VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1213 nova/virt/xenapi/vmops.py:1497
+#: nova/virt/vmwareapi/vmops.py:1246 nova/virt/xenapi/vmops.py:1500
#, python-format
msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1217 nova/virt/xenapi/vmops.py:1501
+#: nova/virt/vmwareapi/vmops.py:1250 nova/virt/xenapi/vmops.py:1504
msgid "Automatically hard rebooting"
msgstr ""
-#: nova/virt/vmwareapi/volumeops.py:217 nova/virt/vmwareapi/volumeops.py:251
+#: nova/virt/vmwareapi/vmops.py:1568
+#, python-format
+msgid "No device with interface-id %s exists on VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1578
+#, python-format
+msgid "No device with MAC address %s exists on the VM"
+msgstr ""
+
+#: nova/virt/vmwareapi/volumeops.py:340 nova/virt/vmwareapi/volumeops.py:375
#, python-format
msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
msgstr ""
-#: nova/virt/vmwareapi/volumeops.py:239 nova/virt/vmwareapi/volumeops.py:414
+#: nova/virt/vmwareapi/volumeops.py:363 nova/virt/vmwareapi/volumeops.py:538
msgid "Unable to find iSCSI Target"
msgstr ""
-#: nova/virt/vmwareapi/volumeops.py:337
+#: nova/virt/vmwareapi/volumeops.py:461
#, python-format
msgid ""
"The volume's backing has been relocated to %s. Need to consolidate "
"backing disk file."
msgstr ""
-#: nova/virt/vmwareapi/volumeops.py:375 nova/virt/vmwareapi/volumeops.py:422
+#: nova/virt/vmwareapi/volumeops.py:499 nova/virt/vmwareapi/volumeops.py:546
msgid "Unable to find volume"
msgstr ""
-#: nova/virt/vmwareapi/volumeops.py:395 nova/virt/vmwareapi/volumeops.py:424
+#: nova/virt/vmwareapi/volumeops.py:519 nova/virt/vmwareapi/volumeops.py:548
#: nova/virt/xenapi/volumeops.py:148
#, python-format
msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
msgstr ""
-#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:1768
+#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:1777
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:117 nova/virt/xenapi/vmops.py:1773
+#: nova/virt/xenapi/agent.py:117 nova/virt/xenapi/vmops.py:1782
#, python-format
msgid ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
"args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:122 nova/virt/xenapi/vmops.py:1778
+#: nova/virt/xenapi/agent.py:122 nova/virt/xenapi/vmops.py:1787
#, python-format
msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
msgstr ""
@@ -8872,65 +8549,65 @@ msgstr ""
msgid "Failure while cleaning up attached VDIs"
msgstr ""
-#: nova/virt/xenapi/driver.py:386
+#: nova/virt/xenapi/driver.py:390
#, python-format
msgid "Could not determine key: %s"
msgstr ""
-#: nova/virt/xenapi/driver.py:632
+#: nova/virt/xenapi/driver.py:641
msgid "Host startup on XenServer is not supported."
msgstr ""
-#: nova/virt/xenapi/fake.py:812
+#: nova/virt/xenapi/fake.py:820
#, python-format
msgid "xenapi.fake does not have an implementation for %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:920
+#: nova/virt/xenapi/fake.py:928
#, python-format
msgid ""
"xenapi.fake does not have an implementation for %s or it has been called "
"with the wrong number of arguments"
msgstr ""
-#: nova/virt/xenapi/host.py:74
+#: nova/virt/xenapi/host.py:73
#, python-format
msgid ""
"Instance %(name)s running on %(host)s could not be found in the database:"
" assuming it is a worker VM and skip ping migration to a new host"
msgstr ""
-#: nova/virt/xenapi/host.py:86
+#: nova/virt/xenapi/host.py:85
#, python-format
msgid "Aggregate for host %(host)s count not be found."
msgstr ""
-#: nova/virt/xenapi/host.py:105
+#: nova/virt/xenapi/host.py:104
#, python-format
msgid "Unable to migrate VM %(vm_ref)s from %(host)s"
msgstr ""
-#: nova/virt/xenapi/host.py:186
+#: nova/virt/xenapi/host.py:185
msgid "Failed to parse information about a pci device for passthrough"
msgstr ""
-#: nova/virt/xenapi/host.py:259
+#: nova/virt/xenapi/host.py:258
#, python-format
msgid ""
"Hostname has changed from %(old)s to %(new)s. A restart is required to "
"take effect."
msgstr ""
-#: nova/virt/xenapi/host.py:284
+#: nova/virt/xenapi/host.py:283
#, python-format
msgid "Failed to extract instance support from %s"
msgstr ""
-#: nova/virt/xenapi/host.py:301
+#: nova/virt/xenapi/host.py:300
msgid "Unable to get updated status"
msgstr ""
-#: nova/virt/xenapi/host.py:304
+#: nova/virt/xenapi/host.py:303
#, python-format
msgid "The call to %(method)s returned an error: %(e)s."
msgstr ""
@@ -9004,231 +8681,231 @@ msgid ""
"Expected %(vlan_num)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:208
+#: nova/virt/xenapi/vm_utils.py:210
#, python-format
msgid ""
"Device id %(id)s specified is not supported by hypervisor version "
"%(version)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:325 nova/virt/xenapi/vm_utils.py:340
+#: nova/virt/xenapi/vm_utils.py:328 nova/virt/xenapi/vm_utils.py:343
msgid "VM already halted, skipping shutdown..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:392
+#: nova/virt/xenapi/vm_utils.py:395
#, python-format
msgid "VBD %s already detached"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:395
+#: nova/virt/xenapi/vm_utils.py:398
#, python-format
msgid ""
"VBD %(vbd_ref)s uplug failed with \"%(err)s\", attempt "
"%(num_attempt)d/%(max_attempts)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:402
+#: nova/virt/xenapi/vm_utils.py:405
#, python-format
msgid "Unable to unplug VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:405
+#: nova/virt/xenapi/vm_utils.py:408
#, python-format
msgid "Reached maximum number of retries trying to unplug VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:417
+#: nova/virt/xenapi/vm_utils.py:420
#, python-format
msgid "Unable to destroy VBD %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:470
+#: nova/virt/xenapi/vm_utils.py:473
#, python-format
msgid "Unable to destroy VDI %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:516
+#: nova/virt/xenapi/vm_utils.py:519
msgid "SR not present and could not be introduced"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:700
+#: nova/virt/xenapi/vm_utils.py:703
#, python-format
msgid "No primary VDI found for %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:792
+#: nova/virt/xenapi/vm_utils.py:795
#, python-format
msgid ""
"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s"
" is of type %(type)s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:871
+#: nova/virt/xenapi/vm_utils.py:874
#, python-format
msgid "Multiple base images for image: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:926
+#: nova/virt/xenapi/vm_utils.py:929
#, python-format
msgid ""
"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor "
"size of %(new_disk_size)d bytes."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:937 nova/virt/xenapi/vmops.py:1037
+#: nova/virt/xenapi/vm_utils.py:940 nova/virt/xenapi/vmops.py:1040
msgid "Can't resize a disk to 0 GB."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:989
+#: nova/virt/xenapi/vm_utils.py:992
msgid "Disk must have only one partition."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:994
+#: nova/virt/xenapi/vm_utils.py:997
#, python-format
msgid "Disk contains a filesystem we are unable to resize: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:999
+#: nova/virt/xenapi/vm_utils.py:1002
msgid "The only partition should be partition 1."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1010
+#: nova/virt/xenapi/vm_utils.py:1013
#, python-format
msgid "Attempted auto_configure_disk failed because: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1261
+#: nova/virt/xenapi/vm_utils.py:1264
#, python-format
msgid ""
"Fast cloning is only supported on default local SR of type ext. SR on "
"this system was found to be of type %s. Ignoring the cow flag."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1336
+#: nova/virt/xenapi/vm_utils.py:1339
#, python-format
msgid "Unrecognized cache_images value '%s', defaulting to True"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1412
+#: nova/virt/xenapi/vm_utils.py:1415
#, python-format
msgid "Invalid value '%s' for torrent_images"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1435
+#: nova/virt/xenapi/vm_utils.py:1438
#, python-format
msgid "Invalid value '%d' for image_compression_level"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1461
+#: nova/virt/xenapi/vm_utils.py:1464
#, python-format
msgid ""
"Download handler '%(handler)s' raised an exception, falling back to "
"default handler '%(default_handler)s'"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1517
+#: nova/virt/xenapi/vm_utils.py:1520
#, python-format
msgid "Image size %(size)d exceeded flavor allowed size %(allowed_size)d"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1568
+#: nova/virt/xenapi/vm_utils.py:1571
#, python-format
msgid ""
"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d "
"bytes"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1610
+#: nova/virt/xenapi/vm_utils.py:1613
msgid "Failed to fetch glance image"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1818
+#: nova/virt/xenapi/vm_utils.py:1846
#, python-format
msgid "Unable to parse rrd of %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1848
+#: nova/virt/xenapi/vm_utils.py:1876
#, python-format
msgid "Retry SR scan due to error: %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1881
+#: nova/virt/xenapi/vm_utils.py:1909
#, python-format
msgid "Flag sr_matching_filter '%s' does not respect formatting convention"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1902
+#: nova/virt/xenapi/vm_utils.py:1930
msgid ""
"XenAPI is unable to find a Storage Repository to install guest instances "
"on. Please check your configuration (e.g. set a default SR for the pool) "
"and/or configure the flag 'sr_matching_filter'."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1915
+#: nova/virt/xenapi/vm_utils.py:1943
msgid "Cannot find SR of content-type ISO"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1968
+#: nova/virt/xenapi/vm_utils.py:1996
#, python-format
msgid ""
"Unable to obtain RRD XML for VM %(vm_uuid)s with server details: "
"%(server)s."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2096
+#: nova/virt/xenapi/vm_utils.py:2124
#, python-format
msgid "VHD coalesce attempts exceeded (%d), giving up..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2131
+#: nova/virt/xenapi/vm_utils.py:2159
#, python-format
msgid "Timeout waiting for device %s to be created"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2151
+#: nova/virt/xenapi/vm_utils.py:2179
#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2309
+#: nova/virt/xenapi/vm_utils.py:2337
msgid ""
"Shrinking the filesystem down with resize2fs has failed, please check if "
"you have enough free space on your disk."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2444
+#: nova/virt/xenapi/vm_utils.py:2472
msgid "Manipulating interface files directly"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2453
+#: nova/virt/xenapi/vm_utils.py:2481
#, python-format
msgid "Failed to mount filesystem (expected for non-linux instances): %s"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2564
+#: nova/virt/xenapi/vm_utils.py:2496
msgid "This domU must be running on the host specified by connection_url"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2633
+#: nova/virt/xenapi/vm_utils.py:2565
msgid "Failed to transfer vhd to new host"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2659
+#: nova/virt/xenapi/vm_utils.py:2591
msgid "ipxe_boot_menu_url not set, user will have to enter URL manually..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2665
+#: nova/virt/xenapi/vm_utils.py:2597
msgid "ipxe_network_name not set, user will have to enter IP manually..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2676
+#: nova/virt/xenapi/vm_utils.py:2608
#, python-format
msgid ""
"Unable to find network matching '%(network_name)s', user will have to "
"enter IP manually..."
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:2700
+#: nova/virt/xenapi/vm_utils.py:2632
#, python-format
msgid "ISO creation tool '%s' does not exist."
msgstr ""
@@ -9237,104 +8914,104 @@ msgstr ""
msgid "Error: Agent is disabled"
msgstr ""
-#: nova/virt/xenapi/vmops.py:375
+#: nova/virt/xenapi/vmops.py:378
msgid "ipxe_boot is True but no ISO image found"
msgstr ""
-#: nova/virt/xenapi/vmops.py:518
+#: nova/virt/xenapi/vmops.py:521
msgid "Failed to spawn, rolling back"
msgstr ""
-#: nova/virt/xenapi/vmops.py:783
+#: nova/virt/xenapi/vmops.py:786
msgid "Unable to terminate instance."
msgstr ""
-#: nova/virt/xenapi/vmops.py:835
+#: nova/virt/xenapi/vmops.py:838
#, python-format
msgid "_migrate_disk_resizing_down failed. Restoring orig vm due_to: %s."
msgstr ""
-#: nova/virt/xenapi/vmops.py:989
+#: nova/virt/xenapi/vmops.py:992
#, python-format
msgid "_migrate_disk_resizing_up failed. Restoring orig vm due_to: %s."
msgstr ""
-#: nova/virt/xenapi/vmops.py:996
+#: nova/virt/xenapi/vmops.py:999
#, python-format
msgid "_migrate_disk_resizing_up failed to rollback: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1013
+#: nova/virt/xenapi/vmops.py:1016
msgid "Can't resize down ephemeral disks."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1124
+#: nova/virt/xenapi/vmops.py:1127
msgid "Starting halted instance found during reboot"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1130
+#: nova/virt/xenapi/vmops.py:1133
msgid ""
"Reboot failed due to bad volumes, detaching bad volumes and starting "
"halted instance"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1208
+#: nova/virt/xenapi/vmops.py:1211
msgid "Unable to update metadata, VM not found."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1254
+#: nova/virt/xenapi/vmops.py:1257
msgid "Unable to find root VBD/VDI for VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1292
+#: nova/virt/xenapi/vmops.py:1295
msgid "instance has a kernel or ramdisk but not both"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1326
+#: nova/virt/xenapi/vmops.py:1329
msgid "Destroying VM"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1355
+#: nova/virt/xenapi/vmops.py:1358
msgid "VM is not present, skipping destroy..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1406
+#: nova/virt/xenapi/vmops.py:1409
#, python-format
msgid "Instance is already in Rescue Mode: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1448
+#: nova/virt/xenapi/vmops.py:1451
msgid "VM is not present, skipping soft delete..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1834
+#: nova/virt/xenapi/vmops.py:1843
#, python-format
msgid "Destination host:%s must be in the same aggregate as the source server"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1855
+#: nova/virt/xenapi/vmops.py:1864
msgid "No suitable network for migrate"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1861
+#: nova/virt/xenapi/vmops.py:1870
#, python-format
msgid "PIF %s does not contain IP address"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1874
+#: nova/virt/xenapi/vmops.py:1883
msgid "Migrate Receive failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1948
+#: nova/virt/xenapi/vmops.py:1957
msgid "XAPI supporting relax-xsm-sr-check=true required"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1959
+#: nova/virt/xenapi/vmops.py:1968
#, python-format
msgid "assert_can_migrate failed because: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:2019
+#: nova/virt/xenapi/vmops.py:2028
msgid "Migrate Send failed"
msgstr ""
@@ -9389,6 +9066,11 @@ msgstr ""
msgid "Unable to find SR from VBD %s"
msgstr ""
+#: nova/virt/xenapi/volume_utils.py:311
+#, python-format
+msgid "Unable to find SR from VDI %s"
+msgstr ""
+
#: nova/virt/xenapi/volumeops.py:63
#, python-format
msgid "Connected volume (vdi_uuid): %s"
@@ -9469,11 +9151,16 @@ msgstr ""
msgid "Starting nova-xvpvncproxy node (version %s)"
msgstr ""
-#: nova/volume/cinder.py:236
+#: nova/volume/cinder.py:257
+#, python-format
+msgid "Invalid client version, must be one of: %s"
+msgstr ""
+
+#: nova/volume/cinder.py:281
msgid "status must be 'in-use'"
msgstr ""
-#: nova/volume/cinder.py:242
+#: nova/volume/cinder.py:287
msgid "status must be 'available'"
msgstr ""
diff --git a/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po b/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po
index de1d48a079..7806193e69 100644
--- a/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/pt_BR/LC_MESSAGES/nova-log-error.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:08+0000\n"
+"POT-Creation-Date: 2014-08-18 06:04+0000\n"
"PO-Revision-Date: 2014-06-14 19:30+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/nova/"
@@ -39,11 +39,305 @@ msgstr ""
msgid "Exception running %(name)s post-hook: %(obj)s"
msgstr ""
-#: nova/api/ec2/__init__.py:243
+#: nova/api/ec2/__init__.py:244
#, python-format
msgid "Keystone failure: %s"
msgstr ""
+#: nova/api/ec2/__init__.py:493
+#, python-format
+msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:520
+#, python-format
+msgid "Environment: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:155
+#, python-format
+msgid "Failed to get metadata for ip: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:212
+#, python-format
+msgid "Failed to get metadata for instance id: %s"
+msgstr ""
+
+#: nova/api/openstack/common.py:134
+#, python-format
+msgid ""
+"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad "
+"upgrade or db corrupted?"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:684
+#, python-format
+msgid "Exception handling resource: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:68
+#, python-format
+msgid "Compute.api::pause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:90
+#, python-format
+msgid "Compute.api::unpause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:112
+#, python-format
+msgid "compute.api::suspend %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:134
+#, python-format
+msgid "compute.api::resume %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:160
+#, python-format
+msgid "Error in migrate %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:179
+#, python-format
+msgid "Compute.api::reset_network %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:198
+#, python-format
+msgid "Compute.api::inject_network_info %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:215
+#, python-format
+msgid "Compute.api::lock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:234
+#, python-format
+msgid "Compute.api::unlock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:392
+#, python-format
+msgid "Compute.api::resetState %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/multinic.py:85
+#, python-format
+msgid "Unable to find address %r"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85
+msgid "Failed to get default networks"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125
+msgid "Failed to update usages deallocating network."
+msgstr ""
+
+#: nova/compute/api.py:561
+msgid "Failed to set instance name using multi_instance_display_name_template."
+msgstr ""
+
+#: nova/compute/api.py:1429
+msgid ""
+"Something wrong happened when trying to delete snapshot from shelved "
+"instance."
+msgstr ""
+
+#: nova/compute/api.py:3732
+msgid "Failed to update usages deallocating security group"
+msgstr ""
+
+#: nova/compute/flavors.py:167
+#, python-format
+msgid "DB error: %s"
+msgstr ""
+
+#: nova/compute/flavors.py:178
+#, python-format
+msgid "Instance type %s not found for deletion"
+msgstr ""
+
+#: nova/compute/manager.py:366
+#, python-format
+msgid "Error while trying to clean up image %s"
+msgstr ""
+
+#: nova/compute/manager.py:755
+msgid "Failed to check if instance shared"
+msgstr ""
+
+#: nova/compute/manager.py:821 nova/compute/manager.py:872
+msgid "Failed to complete a deletion"
+msgstr ""
+
+#: nova/compute/manager.py:913
+msgid "Failed to stop instance"
+msgstr ""
+
+#: nova/compute/manager.py:925
+msgid "Failed to start instance"
+msgstr ""
+
+#: nova/compute/manager.py:950
+msgid "Failed to revert crashed migration"
+msgstr ""
+
+#: nova/compute/manager.py:1364
+msgid "Failed to dealloc network for deleted instance"
+msgstr ""
+
+#: nova/compute/manager.py:1385
+msgid "Failed to dealloc network for failed instance"
+msgstr ""
+
+#: nova/compute/manager.py:1458 nova/compute/manager.py:3527
+msgid "Error trying to reschedule"
+msgstr ""
+
+#: nova/compute/manager.py:1567
+#, python-format
+msgid "Instance failed network setup after %(attempts)d attempt(s)"
+msgstr ""
+
+#: nova/compute/manager.py:1761
+msgid "Instance failed block device setup"
+msgstr ""
+
+#: nova/compute/manager.py:1781 nova/compute/manager.py:2123
+#: nova/compute/manager.py:4071
+msgid "Instance failed to spawn"
+msgstr ""
+
+#: nova/compute/manager.py:1964
+msgid "Unexpected build failure, not rescheduling build."
+msgstr ""
+
+#: nova/compute/manager.py:2033 nova/compute/manager.py:2085
+msgid "Failed to allocate network(s)"
+msgstr ""
+
+#: nova/compute/manager.py:2111
+msgid "Failure prepping block device"
+msgstr ""
+
+#: nova/compute/manager.py:2144
+msgid "Failed to deallocate networks"
+msgstr ""
+
+#: nova/compute/manager.py:2374 nova/compute/manager.py:3718
+#: nova/compute/manager.py:5822
+msgid "Setting instance vm_state to ERROR"
+msgstr ""
+
+#: nova/compute/manager.py:2586 nova/compute/manager.py:4933
+#, python-format
+msgid "Failed to get compute_info for %s"
+msgstr ""
+
+#: nova/compute/manager.py:3013
+#, python-format
+msgid "set_admin_password failed: %s"
+msgstr ""
+
+#: nova/compute/manager.py:3098
+msgid "Error trying to Rescue Instance"
+msgstr ""
+
+#: nova/compute/manager.py:3724
+#, python-format
+msgid "Failed to rollback quota for failed finish_resize: %s"
+msgstr ""
+
+#: nova/compute/manager.py:4323
+#, python-format
+msgid "Failed to attach %(volume_id)s at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4362
+#, python-format
+msgid "Failed to detach volume %(volume_id)s from %(mp)s"
+msgstr ""
+
+#: nova/compute/manager.py:4441
+#, python-format
+msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s"
+msgstr ""
+
+#: nova/compute/manager.py:4448
+#, python-format
+msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4735
+#, python-format
+msgid "Pre live migration failed at %s"
+msgstr ""
+
+#: nova/compute/manager.py:5235
+msgid "Periodic task failed to offload instance."
+msgstr ""
+
+#: nova/compute/manager.py:5275
+#, python-format
+msgid "Failed to generate usage audit for instance on host %s"
+msgstr ""
+
+#: nova/compute/manager.py:5465
+msgid ""
+"Periodic sync_power_state task had an error while processing an instance."
+msgstr ""
+
+#: nova/compute/manager.py:5568 nova/compute/manager.py:5577
+#: nova/compute/manager.py:5608 nova/compute/manager.py:5619
+msgid "error during stop() in sync_power_state."
+msgstr ""
+
+#: nova/network/neutronv2/api.py:234
+#, python-format
+msgid "Neutron error creating port on network %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:418
+#, python-format
+msgid "Failed to update port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:425
+#, python-format
+msgid "Failed to delete port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524
+#, python-format
+msgid "Failed to delete neutron port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:697
+#, python-format
+msgid "Failed to access port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:931
+#, python-format
+msgid "Unable to access floating IP %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1065
+#, python-format
+msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1124
+#, python-format
+msgid "Unable to update host of port %s"
+msgstr ""
+
#: nova/objects/instance_fault.py:87
msgid "Failed to notify cells of instance fault"
msgstr "Falha ao notificar células de falha da instância"
@@ -58,35 +352,35 @@ msgstr "Exceção original sendo descartada: %s"
msgid "Unexpected exception occurred %d time(s)... retrying."
msgstr "Exceção não esperada ocorreu %d vez(es)... tentando novamente."
-#: nova/openstack/common/lockutils.py:120
+#: nova/openstack/common/lockutils.py:119
#, python-format
msgid "Could not release the acquired lock `%s`"
msgstr ""
-#: nova/openstack/common/loopingcall.py:89
+#: nova/openstack/common/loopingcall.py:95
msgid "in fixed duration looping call"
msgstr "em uma chamada de laço de duração fixa"
-#: nova/openstack/common/loopingcall.py:136
+#: nova/openstack/common/loopingcall.py:138
msgid "in dynamic looping call"
msgstr "em chamada de laço dinâmico"
-#: nova/openstack/common/periodic_task.py:179
+#: nova/openstack/common/periodic_task.py:202
#, python-format
msgid "Error during %(full_task_name)s: %(e)s"
msgstr "Erro durante %(full_task_name)s: %(e)s"
-#: nova/openstack/common/policy.py:511
+#: nova/openstack/common/policy.py:507
#, python-format
msgid "Failed to understand rule %s"
msgstr "Falha ao interpretar regra %s"
-#: nova/openstack/common/policy.py:521
+#: nova/openstack/common/policy.py:517
#, python-format
msgid "No handler for matches of kind %s"
msgstr "Nenhum manipulador para correspondências do tipo %s"
-#: nova/openstack/common/policy.py:791
+#: nova/openstack/common/policy.py:787
#, python-format
msgid "Failed to understand rule %r"
msgstr "Falha ao interpretar regra %r"
@@ -116,170 +410,184 @@ msgstr "Exceção de BD incluída."
msgid "Failed to migrate to version %s on engine %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:625
+#: nova/pci/pci_stats.py:119
+msgid ""
+"Failed to allocate PCI devices for instance. Unassigning devices back to "
+"pools. This should not happen, since the scheduler should have accurate "
+"information, and allocation during claims is controlled via a hold on the "
+"compute node semaphore"
+msgstr ""
+
+#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109
+#, python-format
+msgid "PCI device %s not found"
+msgstr ""
+
+#: nova/virt/disk/api.py:388
+#, python-format
+msgid ""
+"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:639
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:749
+#: nova/virt/libvirt/driver.py:764
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:873
+#: nova/virt/libvirt/driver.py:927
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:889
-msgid "During wait destroy, instance disappeared."
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:951
+#: nova/virt/libvirt/driver.py:1005
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:977
+#: nova/virt/libvirt/driver.py:1033
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1389
+#: nova/virt/libvirt/driver.py:1444
msgid "attaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1414
+#: nova/virt/libvirt/driver.py:1471
msgid "detaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1663
+#: nova/virt/libvirt/driver.py:1726
msgid "Failed to send updated snapshot status to volume service."
msgstr ""
-#: nova/virt/libvirt/driver.py:1749
+#: nova/virt/libvirt/driver.py:1834
msgid ""
"Unable to create quiesced VM snapshot, attempting again with quiescing "
"disabled."
msgstr ""
-#: nova/virt/libvirt/driver.py:1755
+#: nova/virt/libvirt/driver.py:1840
msgid "Unable to create VM snapshot, failing volume_snapshot operation."
msgstr ""
-#: nova/virt/libvirt/driver.py:1804
+#: nova/virt/libvirt/driver.py:1889
msgid ""
"Error occurred during volume_snapshot_create, sending error status to Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:1951
+#: nova/virt/libvirt/driver.py:2111
msgid ""
"Error occurred during volume_snapshot_delete, sending error status to Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421
+#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:2542
+#: nova/virt/libvirt/driver.py:2705
#, python-format
msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2693
+#: nova/virt/libvirt/driver.py:2873
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2786
+#: nova/virt/libvirt/driver.py:2966
#, python-format
msgid "Attaching PCI devices %(dev)s to %(dom)s failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:3553
+#: nova/virt/libvirt/driver.py:3783
#, python-format
-msgid "An error occurred while trying to define a domain with xml: %s"
+msgid "Error defining a domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3562
+#: nova/virt/libvirt/driver.py:3787
#, python-format
-msgid "An error occurred while trying to launch a defined domain with xml: %s"
+msgid "Error launching a defined domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3571
+#: nova/virt/libvirt/driver.py:3792
#, python-format
-msgid "An error occurred while enabling hairpin mode on domain with xml: %s"
+msgid "Error enabling hairpin mode with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3589
+#: nova/virt/libvirt/driver.py:3806
#, python-format
msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3904
+#: nova/virt/libvirt/driver.py:4115
#, python-format
msgid ""
"Hostname has changed from %(old)s to %(new)s. A restart is required to take "
"effect."
msgstr ""
-#: nova/virt/libvirt/driver.py:4481
+#: nova/virt/libvirt/driver.py:4794
#, python-format
msgid "Live Migration failure: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5231
+#: nova/virt/libvirt/driver.py:5596
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:202
+#: nova/virt/libvirt/imagebackend.py:200
#, python-format
msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:230
+#: nova/virt/libvirt/imagebackend.py:227
#, python-format
msgid ""
"%(base)s virtual size %(base_size)s larger than flavor root disk size "
"%(size)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:501
-#, python-format
-msgid "error opening rbd image %s"
-msgstr ""
-
-#: nova/virt/libvirt/imagecache.py:130
+#: nova/virt/libvirt/imagecache.py:129
#, python-format
msgid "Error reading image info file %(filename)s: %(error)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:391
+#: nova/virt/libvirt/imagecache.py:390
#, python-format
msgid "image %(id)s at (%(base_file)s): image verification failed"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:448
+#: nova/virt/libvirt/imagecache.py:447
#, python-format
msgid "Failed to remove %(base_file)s, error was %(error)s"
msgstr ""
-#: nova/virt/libvirt/lvm.py:201
+#: nova/virt/libvirt/lvm.py:200
#, python-format
msgid "ignoring unrecognized volume_clear='%s' value"
msgstr ""
-#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572
-#: nova/virt/libvirt/vif.py:596
+#: nova/virt/libvirt/rbd_utils.py:62
+#, python-format
+msgid "error opening rbd image %s"
+msgstr ""
+
+#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474
+#: nova/virt/libvirt/vif.py:496
msgid "Failed while plugging vif"
msgstr ""
-#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676
-#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717
-#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762
-#: nova/virt/libvirt/vif.py:784
+#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560
+#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598
+#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639
msgid "Failed while unplugging vif"
msgstr ""
@@ -288,12 +596,28 @@ msgstr ""
msgid "Unknown content in connection_info/access_mode: %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:666
+#: nova/virt/libvirt/volume.py:669
#, python-format
msgid "Couldn't unmount the NFS share %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:815
+#: nova/virt/libvirt/volume.py:818
#, python-format
msgid "Couldn't unmount the GlusterFS share %s"
msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:508
+#, python-format
+msgid ""
+"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1551
+#, python-format
+msgid "Attaching network adapter failed. Exception: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1591
+#, python-format
+msgid "Detaching network adapter failed. Exception: %s"
+msgstr ""
diff --git a/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po b/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po
index 29c655d5b6..ac1c2850f7 100644
--- a/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/pt_BR/LC_MESSAGES/nova-log-info.po
@@ -7,8 +7,8 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
-"PO-Revision-Date: 2014-06-30 05:01+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
+"PO-Revision-Date: 2014-07-16 14:42+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/nova/"
"language/pt_BR/)\n"
@@ -19,27 +19,77 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+#: nova/api/openstack/__init__.py:101
+#, python-format
+msgid "%(url)s returned with HTTP %(status)d"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:294
+msgid "V3 API has been disabled by configuration"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:688
+#, python-format
+msgid "Fault thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:691
+#, python-format
+msgid "HTTP exception thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_networks.py:101
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128
+#, python-format
+msgid "Deleting network with id %s"
+msgstr ""
+
+#: nova/compute/manager.py:2663
+#, python-format
+msgid "bringing vm to original state: '%s'"
+msgstr ""
+
+#: nova/compute/manager.py:5471
+#, python-format
+msgid ""
+"During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36
+msgid ""
+"Skipped adding reservations_deleted_expire_idx because an equivalent index "
+"already exists."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58
+msgid ""
+"Skipped removing reservations_deleted_expire_idx because index does not "
+"exist."
+msgstr ""
+
#: nova/openstack/common/eventlet_backdoor.py:141
#, python-format
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
msgstr "Backdoor de Eventlet escutando na porta %(port)s pelo processo %(pid)d"
-#: nova/openstack/common/lockutils.py:83
+#: nova/openstack/common/lockutils.py:82
#, python-format
msgid "Created lock path: %s"
msgstr "Criado caminho de lock: %s"
-#: nova/openstack/common/lockutils.py:250
+#: nova/openstack/common/lockutils.py:251
#, python-format
msgid "Failed to remove file %(file)s"
msgstr ""
-#: nova/openstack/common/periodic_task.py:125
+#: nova/openstack/common/periodic_task.py:126
#, python-format
msgid "Skipping periodic task %(task)s because its interval is negative"
msgstr "Ignorando tarefa periódica %(task)s porque seu intervalo é negativo"
-#: nova/openstack/common/periodic_task.py:130
+#: nova/openstack/common/periodic_task.py:131
#, python-format
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr "Ignorando tarefa periódica %(task)s porque ela está desativada"
@@ -101,97 +151,111 @@ msgstr "Excluindo linha duplicada com ID: %(id)s da tabela: %(table)s"
msgid "%(num_values)d values found, of which the minimum value will be used."
msgstr ""
-#: nova/virt/libvirt/driver.py:894
+#: nova/virt/block_device.py:221
+#, python-format
+msgid "preserve multipath_id %s"
+msgstr ""
+
+#: nova/virt/firewall.py:444
+#, python-format
+msgid "instance chain %s disappeared during refresh, skipping"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:139
+msgid "Unable to force TCG mode, libguestfs too old?"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:835
+#, python-format
+msgid ""
+"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:948
msgid "Instance destroyed successfully."
msgstr "Instância destruída com êxito."
-#: nova/virt/libvirt/driver.py:904
+#: nova/virt/libvirt/driver.py:958
msgid "Instance may be started again."
msgstr "A instância pode ser iniciada novamente."
-#: nova/virt/libvirt/driver.py:914
+#: nova/virt/libvirt/driver.py:968
msgid "Going to destroy instance again."
msgstr "Destruindo a instância novamente."
-#: nova/virt/libvirt/driver.py:1518
+#: nova/virt/libvirt/driver.py:1576
msgid "Beginning live snapshot process"
msgstr "Começando o processo de captura instantânea em tempo real"
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1579
msgid "Beginning cold snapshot process"
msgstr "Iniciando processo de captura instantânea a frio"
-#: nova/virt/libvirt/driver.py:1550
+#: nova/virt/libvirt/driver.py:1608
msgid "Snapshot extracted, beginning image upload"
msgstr "Captura instantânea extraída, iniciando upload da imagem"
-#: nova/virt/libvirt/driver.py:1562
+#: nova/virt/libvirt/driver.py:1620
msgid "Snapshot image upload complete"
msgstr "Upload da imagem de captura instantânea concluído"
-#: nova/virt/libvirt/driver.py:1972
+#: nova/virt/libvirt/driver.py:2132
msgid "Instance soft rebooted successfully."
msgstr "Reinicialização virtual da instância bem-sucedida."
-#: nova/virt/libvirt/driver.py:2015
+#: nova/virt/libvirt/driver.py:2175
msgid "Instance shutdown successfully."
msgstr "A instância foi encerrada com êxito."
-#: nova/virt/libvirt/driver.py:2023
+#: nova/virt/libvirt/driver.py:2183
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
"A instância pode ter sido reinicializada durante a reinicialização virtual, "
"portanto retorne agora."
-#: nova/virt/libvirt/driver.py:2091
+#: nova/virt/libvirt/driver.py:2252
msgid "Instance rebooted successfully."
msgstr "Instância reinicializada com êxito."
-#: nova/virt/libvirt/driver.py:2259
+#: nova/virt/libvirt/driver.py:2420
msgid "Instance spawned successfully."
msgstr "Feito spawn da instância com êxito."
-#: nova/virt/libvirt/driver.py:2275
+#: nova/virt/libvirt/driver.py:2436
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr "dados: %(data)r, fpath: %(fpath)r"
-#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341
+#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr "Log do console truncado retornado, %d bytes ignorados"
-#: nova/virt/libvirt/driver.py:2568
+#: nova/virt/libvirt/driver.py:2731
msgid "Creating image"
msgstr "Criando imagem"
-#: nova/virt/libvirt/driver.py:2677
+#: nova/virt/libvirt/driver.py:2857
msgid "Using config drive"
msgstr "Usando unidade de configuração"
-#: nova/virt/libvirt/driver.py:2686
+#: nova/virt/libvirt/driver.py:2866
#, python-format
msgid "Creating config drive at %(path)s"
msgstr "Criando unidade de configuração em %(path)s"
-#: nova/virt/libvirt/driver.py:3223
+#: nova/virt/libvirt/driver.py:3437
msgid "Configuring timezone for windows instance to localtime"
msgstr ""
-#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821
-#: nova/virt/libvirt/driver.py:3849
-#, python-format
-msgid "libvirt can't find a domain with id: %s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:4109
+#: nova/virt/libvirt/driver.py:4320
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4115
+#: nova/virt/libvirt/driver.py:4326
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats for "
@@ -200,48 +264,48 @@ msgstr ""
"Não foi possível localizar o domínio em libvirt para a instância %s. Não é "
"possível obter estatísticas do bloco para o dispositivo"
-#: nova/virt/libvirt/driver.py:4330
+#: nova/virt/libvirt/driver.py:4568
#, python-format
msgid "Instance launched has CPU info: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4986
+#: nova/virt/libvirt/driver.py:5316
msgid "Instance running successfully."
msgstr "Instância executando com êxito."
-#: nova/virt/libvirt/driver.py:5226
+#: nova/virt/libvirt/driver.py:5590
#, python-format
msgid "Deleting instance files %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5238
+#: nova/virt/libvirt/driver.py:5603
#, python-format
msgid "Deletion of %s failed"
msgstr ""
-#: nova/virt/libvirt/driver.py:5241
+#: nova/virt/libvirt/driver.py:5607
#, python-format
msgid "Deletion of %s complete"
msgstr ""
-#: nova/virt/libvirt/firewall.py:105
+#: nova/virt/libvirt/firewall.py:106
msgid "Called setup_basic_filtering in nwfilter"
msgstr "Chamada setup_basic_filtering em nwfilter"
-#: nova/virt/libvirt/firewall.py:113
+#: nova/virt/libvirt/firewall.py:114
msgid "Ensuring static filters"
msgstr "Assegurando filtros estáticos"
-#: nova/virt/libvirt/firewall.py:306
+#: nova/virt/libvirt/firewall.py:305
msgid "Attempted to unfilter instance which is not filtered"
msgstr "Tentou cancelar a filtragem da instância que não foi filtrada"
-#: nova/virt/libvirt/imagecache.py:191
+#: nova/virt/libvirt/imagecache.py:190
#, python-format
msgid "Writing stored info to %s"
msgstr "Gravando informações armazenadas em %s"
-#: nova/virt/libvirt/imagecache.py:401
+#: nova/virt/libvirt/imagecache.py:400
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash stored"
@@ -249,27 +313,27 @@ msgstr ""
"imagem %(id)s em (%(base_file)s): verificação de imagem ignorada, nenhum "
"hash armazenado"
-#: nova/virt/libvirt/imagecache.py:410
+#: nova/virt/libvirt/imagecache.py:409
#, python-format
msgid "%(id)s (%(base_file)s): generating checksum"
msgstr "%(id)s (%(base_file)s): gerando soma de verificação"
-#: nova/virt/libvirt/imagecache.py:438
+#: nova/virt/libvirt/imagecache.py:437
#, python-format
msgid "Base file too young to remove: %s"
msgstr "O arquivo base é muito jovem para ser removido: %s"
-#: nova/virt/libvirt/imagecache.py:441
+#: nova/virt/libvirt/imagecache.py:440
#, python-format
msgid "Removing base file: %s"
msgstr "Removendo arquivo base: %s"
-#: nova/virt/libvirt/imagecache.py:459
+#: nova/virt/libvirt/imagecache.py:458
#, python-format
msgid "image %(id)s at (%(base_file)s): checking"
msgstr "imagem %(id)s em (%(base_file)s): verificando"
-#: nova/virt/libvirt/imagecache.py:483
+#: nova/virt/libvirt/imagecache.py:482
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
@@ -278,26 +342,26 @@ msgstr ""
"imagem %(id)s em (%(base_file)s): em uso: neste nó %(local)d local, "
"%(remote)d em outros nós que compartilham esse armazenamento de instância"
-#: nova/virt/libvirt/imagecache.py:550
+#: nova/virt/libvirt/imagecache.py:549
#, python-format
msgid "Active base files: %s"
msgstr "Arquivos base ativos: %s"
-#: nova/virt/libvirt/imagecache.py:553
+#: nova/virt/libvirt/imagecache.py:552
#, python-format
msgid "Corrupt base files: %s"
msgstr "Arquivos base corrompidos: %s"
-#: nova/virt/libvirt/imagecache.py:557
+#: nova/virt/libvirt/imagecache.py:556
#, python-format
msgid "Removable base files: %s"
msgstr "Arquivos base removíveis: %s"
-#: nova/virt/libvirt/utils.py:530
+#: nova/virt/libvirt/utils.py:490
msgid "findmnt tool is not installed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1352
+#: nova/virt/xenapi/vm_utils.py:1355
#, python-format
msgid ""
"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s "
diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po b/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po
index 07779ffe21..cce29b7642 100644
--- a/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:08+0000\n"
+"POT-Creation-Date: 2014-08-18 06:04+0000\n"
"PO-Revision-Date: 2014-06-14 19:30+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/nova/"
@@ -39,11 +39,305 @@ msgstr ""
msgid "Exception running %(name)s post-hook: %(obj)s"
msgstr ""
-#: nova/api/ec2/__init__.py:243
+#: nova/api/ec2/__init__.py:244
#, python-format
msgid "Keystone failure: %s"
msgstr ""
+#: nova/api/ec2/__init__.py:493
+#, python-format
+msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
+msgstr ""
+
+#: nova/api/ec2/__init__.py:520
+#, python-format
+msgid "Environment: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:155
+#, python-format
+msgid "Failed to get metadata for ip: %s"
+msgstr ""
+
+#: nova/api/metadata/handler.py:212
+#, python-format
+msgid "Failed to get metadata for instance id: %s"
+msgstr ""
+
+#: nova/api/openstack/common.py:134
+#, python-format
+msgid ""
+"status is UNKNOWN from vm_state=%(vm_state)s task_state=%(task_state)s. Bad "
+"upgrade or db corrupted?"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:684
+#, python-format
+msgid "Exception handling resource: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:68
+#, python-format
+msgid "Compute.api::pause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:90
+#, python-format
+msgid "Compute.api::unpause %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:112
+#, python-format
+msgid "compute.api::suspend %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:134
+#, python-format
+msgid "compute.api::resume %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:160
+#, python-format
+msgid "Error in migrate %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:179
+#, python-format
+msgid "Compute.api::reset_network %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:198
+#, python-format
+msgid "Compute.api::inject_network_info %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:215
+#, python-format
+msgid "Compute.api::lock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:234
+#, python-format
+msgid "Compute.api::unlock %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/admin_actions.py:392
+#, python-format
+msgid "Compute.api::resetState %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/multinic.py:85
+#, python-format
+msgid "Unable to find address %r"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:85
+msgid "Failed to get default networks"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:125
+msgid "Failed to update usages deallocating network."
+msgstr ""
+
+#: nova/compute/api.py:561
+msgid "Failed to set instance name using multi_instance_display_name_template."
+msgstr ""
+
+#: nova/compute/api.py:1429
+msgid ""
+"Something wrong happened when trying to delete snapshot from shelved "
+"instance."
+msgstr ""
+
+#: nova/compute/api.py:3732
+msgid "Failed to update usages deallocating security group"
+msgstr ""
+
+#: nova/compute/flavors.py:167
+#, python-format
+msgid "DB error: %s"
+msgstr ""
+
+#: nova/compute/flavors.py:178
+#, python-format
+msgid "Instance type %s not found for deletion"
+msgstr ""
+
+#: nova/compute/manager.py:366
+#, python-format
+msgid "Error while trying to clean up image %s"
+msgstr ""
+
+#: nova/compute/manager.py:755
+msgid "Failed to check if instance shared"
+msgstr ""
+
+#: nova/compute/manager.py:821 nova/compute/manager.py:872
+msgid "Failed to complete a deletion"
+msgstr ""
+
+#: nova/compute/manager.py:913
+msgid "Failed to stop instance"
+msgstr ""
+
+#: nova/compute/manager.py:925
+msgid "Failed to start instance"
+msgstr ""
+
+#: nova/compute/manager.py:950
+msgid "Failed to revert crashed migration"
+msgstr ""
+
+#: nova/compute/manager.py:1364
+msgid "Failed to dealloc network for deleted instance"
+msgstr ""
+
+#: nova/compute/manager.py:1385
+msgid "Failed to dealloc network for failed instance"
+msgstr ""
+
+#: nova/compute/manager.py:1458 nova/compute/manager.py:3527
+msgid "Error trying to reschedule"
+msgstr ""
+
+#: nova/compute/manager.py:1567
+#, python-format
+msgid "Instance failed network setup after %(attempts)d attempt(s)"
+msgstr ""
+
+#: nova/compute/manager.py:1761
+msgid "Instance failed block device setup"
+msgstr ""
+
+#: nova/compute/manager.py:1781 nova/compute/manager.py:2123
+#: nova/compute/manager.py:4071
+msgid "Instance failed to spawn"
+msgstr ""
+
+#: nova/compute/manager.py:1964
+msgid "Unexpected build failure, not rescheduling build."
+msgstr ""
+
+#: nova/compute/manager.py:2033 nova/compute/manager.py:2085
+msgid "Failed to allocate network(s)"
+msgstr ""
+
+#: nova/compute/manager.py:2111
+msgid "Failure prepping block device"
+msgstr ""
+
+#: nova/compute/manager.py:2144
+msgid "Failed to deallocate networks"
+msgstr ""
+
+#: nova/compute/manager.py:2374 nova/compute/manager.py:3718
+#: nova/compute/manager.py:5822
+msgid "Setting instance vm_state to ERROR"
+msgstr ""
+
+#: nova/compute/manager.py:2586 nova/compute/manager.py:4933
+#, python-format
+msgid "Failed to get compute_info for %s"
+msgstr ""
+
+#: nova/compute/manager.py:3013
+#, python-format
+msgid "set_admin_password failed: %s"
+msgstr ""
+
+#: nova/compute/manager.py:3098
+msgid "Error trying to Rescue Instance"
+msgstr ""
+
+#: nova/compute/manager.py:3724
+#, python-format
+msgid "Failed to rollback quota for failed finish_resize: %s"
+msgstr ""
+
+#: nova/compute/manager.py:4323
+#, python-format
+msgid "Failed to attach %(volume_id)s at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4362
+#, python-format
+msgid "Failed to detach volume %(volume_id)s from %(mp)s"
+msgstr ""
+
+#: nova/compute/manager.py:4441
+#, python-format
+msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s"
+msgstr ""
+
+#: nova/compute/manager.py:4448
+#, python-format
+msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s"
+msgstr ""
+
+#: nova/compute/manager.py:4735
+#, python-format
+msgid "Pre live migration failed at %s"
+msgstr ""
+
+#: nova/compute/manager.py:5235
+msgid "Periodic task failed to offload instance."
+msgstr ""
+
+#: nova/compute/manager.py:5275
+#, python-format
+msgid "Failed to generate usage audit for instance on host %s"
+msgstr ""
+
+#: nova/compute/manager.py:5465
+msgid ""
+"Periodic sync_power_state task had an error while processing an instance."
+msgstr ""
+
+#: nova/compute/manager.py:5568 nova/compute/manager.py:5577
+#: nova/compute/manager.py:5608 nova/compute/manager.py:5619
+msgid "error during stop() in sync_power_state."
+msgstr ""
+
+#: nova/network/neutronv2/api.py:234
+#, python-format
+msgid "Neutron error creating port on network %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:418
+#, python-format
+msgid "Failed to update port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:425
+#, python-format
+msgid "Failed to delete port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:500 nova/network/neutronv2/api.py:524
+#, python-format
+msgid "Failed to delete neutron port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:697
+#, python-format
+msgid "Failed to access port %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:931
+#, python-format
+msgid "Unable to access floating IP %s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1065
+#, python-format
+msgid "Unable to access floating IP %(fixed_ip)s for port %(port_id)s"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:1124
+#, python-format
+msgid "Unable to update host of port %s"
+msgstr ""
+
#: nova/objects/instance_fault.py:87
msgid "Failed to notify cells of instance fault"
msgstr "未能通知单元有关实例故障的事项"
@@ -58,35 +352,35 @@ msgstr "正在删除原始异常:%s"
msgid "Unexpected exception occurred %d time(s)... retrying."
msgstr "意外的异常已发生 %d 次...正在重试。"
-#: nova/openstack/common/lockutils.py:120
+#: nova/openstack/common/lockutils.py:119
#, python-format
msgid "Could not release the acquired lock `%s`"
msgstr ""
-#: nova/openstack/common/loopingcall.py:89
+#: nova/openstack/common/loopingcall.py:95
msgid "in fixed duration looping call"
msgstr "在固定时段内循环调用"
-#: nova/openstack/common/loopingcall.py:136
+#: nova/openstack/common/loopingcall.py:138
msgid "in dynamic looping call"
msgstr "在动态循环调用中"
-#: nova/openstack/common/periodic_task.py:179
+#: nova/openstack/common/periodic_task.py:202
#, python-format
msgid "Error during %(full_task_name)s: %(e)s"
msgstr "在 %(full_task_name)s 期间发生错误:%(e)s"
-#: nova/openstack/common/policy.py:511
+#: nova/openstack/common/policy.py:507
#, python-format
msgid "Failed to understand rule %s"
msgstr "未能理解规则 %s"
-#: nova/openstack/common/policy.py:521
+#: nova/openstack/common/policy.py:517
#, python-format
msgid "No handler for matches of kind %s"
msgstr "对于类型为 %s 的匹配项,不存在任何处理程序"
-#: nova/openstack/common/policy.py:791
+#: nova/openstack/common/policy.py:787
#, python-format
msgid "Failed to understand rule %r"
msgstr "未能理解规则 %r "
@@ -116,170 +410,184 @@ msgstr "数据库异常被包裹。"
msgid "Failed to migrate to version %s on engine %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:625
+#: nova/pci/pci_stats.py:119
+msgid ""
+"Failed to allocate PCI devices for instance. Unassigning devices back to "
+"pools. This should not happen, since the scheduler should have accurate "
+"information, and allocation during claims is controlled via a hold on the "
+"compute node semaphore"
+msgstr ""
+
+#: nova/pci/pci_utils.py:83 nova/pci/pci_utils.py:99 nova/pci/pci_utils.py:109
+#, python-format
+msgid "PCI device %s not found"
+msgstr ""
+
+#: nova/virt/disk/api.py:388
+#, python-format
+msgid ""
+"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:639
#, python-format
msgid "Nova requires libvirt version %(major)i.%(minor)i.%(micro)i or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:749
+#: nova/virt/libvirt/driver.py:764
#, python-format
msgid "Connection to libvirt failed: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:873
+#: nova/virt/libvirt/driver.py:927
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:889
-msgid "During wait destroy, instance disappeared."
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:951
+#: nova/virt/libvirt/driver.py:1005
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:977
+#: nova/virt/libvirt/driver.py:1033
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1389
+#: nova/virt/libvirt/driver.py:1444
msgid "attaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1414
+#: nova/virt/libvirt/driver.py:1471
msgid "detaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1663
+#: nova/virt/libvirt/driver.py:1726
msgid "Failed to send updated snapshot status to volume service."
msgstr ""
-#: nova/virt/libvirt/driver.py:1749
+#: nova/virt/libvirt/driver.py:1834
msgid ""
"Unable to create quiesced VM snapshot, attempting again with quiescing "
"disabled."
msgstr ""
-#: nova/virt/libvirt/driver.py:1755
+#: nova/virt/libvirt/driver.py:1840
msgid "Unable to create VM snapshot, failing volume_snapshot operation."
msgstr ""
-#: nova/virt/libvirt/driver.py:1804
+#: nova/virt/libvirt/driver.py:1889
msgid ""
"Error occurred during volume_snapshot_create, sending error status to Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:1951
+#: nova/virt/libvirt/driver.py:2111
msgid ""
"Error occurred during volume_snapshot_delete, sending error status to Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:2416 nova/virt/libvirt/driver.py:2421
+#: nova/virt/libvirt/driver.py:2577 nova/virt/libvirt/driver.py:2582
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:2542
+#: nova/virt/libvirt/driver.py:2705
#, python-format
msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:2693
+#: nova/virt/libvirt/driver.py:2873
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2786
+#: nova/virt/libvirt/driver.py:2966
#, python-format
msgid "Attaching PCI devices %(dev)s to %(dom)s failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:3553
+#: nova/virt/libvirt/driver.py:3783
#, python-format
-msgid "An error occurred while trying to define a domain with xml: %s"
+msgid "Error defining a domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3562
+#: nova/virt/libvirt/driver.py:3787
#, python-format
-msgid "An error occurred while trying to launch a defined domain with xml: %s"
+msgid "Error launching a defined domain with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3571
+#: nova/virt/libvirt/driver.py:3792
#, python-format
-msgid "An error occurred while enabling hairpin mode on domain with xml: %s"
+msgid "Error enabling hairpin mode with XML: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3589
+#: nova/virt/libvirt/driver.py:3806
#, python-format
msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3904
+#: nova/virt/libvirt/driver.py:4115
#, python-format
msgid ""
"Hostname has changed from %(old)s to %(new)s. A restart is required to take "
"effect."
msgstr ""
-#: nova/virt/libvirt/driver.py:4481
+#: nova/virt/libvirt/driver.py:4794
#, python-format
msgid "Live Migration failure: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5231
+#: nova/virt/libvirt/driver.py:5596
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:202
+#: nova/virt/libvirt/imagebackend.py:200
#, python-format
msgid "Unable to preallocate_images=%(imgs)s at path: %(path)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:230
+#: nova/virt/libvirt/imagebackend.py:227
#, python-format
msgid ""
"%(base)s virtual size %(base_size)s larger than flavor root disk size "
"%(size)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:501
-#, python-format
-msgid "error opening rbd image %s"
-msgstr ""
-
-#: nova/virt/libvirt/imagecache.py:130
+#: nova/virt/libvirt/imagecache.py:129
#, python-format
msgid "Error reading image info file %(filename)s: %(error)s"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:391
+#: nova/virt/libvirt/imagecache.py:390
#, python-format
msgid "image %(id)s at (%(base_file)s): image verification failed"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:448
+#: nova/virt/libvirt/imagecache.py:447
#, python-format
msgid "Failed to remove %(base_file)s, error was %(error)s"
msgstr ""
-#: nova/virt/libvirt/lvm.py:201
+#: nova/virt/libvirt/lvm.py:200
#, python-format
msgid "ignoring unrecognized volume_clear='%s' value"
msgstr ""
-#: nova/virt/libvirt/vif.py:548 nova/virt/libvirt/vif.py:572
-#: nova/virt/libvirt/vif.py:596
+#: nova/virt/libvirt/rbd_utils.py:62
+#, python-format
+msgid "error opening rbd image %s"
+msgstr ""
+
+#: nova/virt/libvirt/vif.py:454 nova/virt/libvirt/vif.py:474
+#: nova/virt/libvirt/vif.py:496
msgid "Failed while plugging vif"
msgstr ""
-#: nova/virt/libvirt/vif.py:644 nova/virt/libvirt/vif.py:676
-#: nova/virt/libvirt/vif.py:695 nova/virt/libvirt/vif.py:717
-#: nova/virt/libvirt/vif.py:737 nova/virt/libvirt/vif.py:762
-#: nova/virt/libvirt/vif.py:784
+#: nova/virt/libvirt/vif.py:546 nova/virt/libvirt/vif.py:560
+#: nova/virt/libvirt/vif.py:579 nova/virt/libvirt/vif.py:598
+#: nova/virt/libvirt/vif.py:619 nova/virt/libvirt/vif.py:639
msgid "Failed while unplugging vif"
msgstr ""
@@ -288,12 +596,28 @@ msgstr ""
msgid "Unknown content in connection_info/access_mode: %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:666
+#: nova/virt/libvirt/volume.py:669
#, python-format
msgid "Couldn't unmount the NFS share %s"
msgstr ""
-#: nova/virt/libvirt/volume.py:815
+#: nova/virt/libvirt/volume.py:818
#, python-format
msgid "Couldn't unmount the GlusterFS share %s"
msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:508
+#, python-format
+msgid ""
+"Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1551
+#, python-format
+msgid "Attaching network adapter failed. Exception: %s"
+msgstr ""
+
+#: nova/virt/vmwareapi/vmops.py:1591
+#, python-format
+msgid "Detaching network adapter failed. Exception: %s"
+msgstr ""
diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po b/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po
index ce4fc2c650..1e340e8082 100644
--- a/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po
@@ -7,8 +7,8 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
-"PO-Revision-Date: 2014-06-30 05:01+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
+"PO-Revision-Date: 2014-07-16 14:42+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/nova/"
"language/zh_CN/)\n"
@@ -19,27 +19,77 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=1; plural=0;\n"
+#: nova/api/openstack/__init__.py:101
+#, python-format
+msgid "%(url)s returned with HTTP %(status)d"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:294
+msgid "V3 API has been disabled by configuration"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:688
+#, python-format
+msgid "Fault thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:691
+#, python-format
+msgid "HTTP exception thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_networks.py:101
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128
+#, python-format
+msgid "Deleting network with id %s"
+msgstr ""
+
+#: nova/compute/manager.py:2663
+#, python-format
+msgid "bringing vm to original state: '%s'"
+msgstr ""
+
+#: nova/compute/manager.py:5471
+#, python-format
+msgid ""
+"During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36
+msgid ""
+"Skipped adding reservations_deleted_expire_idx because an equivalent index "
+"already exists."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58
+msgid ""
+"Skipped removing reservations_deleted_expire_idx because index does not "
+"exist."
+msgstr ""
+
#: nova/openstack/common/eventlet_backdoor.py:141
#, python-format
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
msgstr ""
-#: nova/openstack/common/lockutils.py:83
+#: nova/openstack/common/lockutils.py:82
#, python-format
msgid "Created lock path: %s"
msgstr "已创建锁路径:%s"
-#: nova/openstack/common/lockutils.py:250
+#: nova/openstack/common/lockutils.py:251
#, python-format
msgid "Failed to remove file %(file)s"
msgstr ""
-#: nova/openstack/common/periodic_task.py:125
+#: nova/openstack/common/periodic_task.py:126
#, python-format
msgid "Skipping periodic task %(task)s because its interval is negative"
msgstr "正在跳过周期性任务 %(task)s,因为其时间间隔为负"
-#: nova/openstack/common/periodic_task.py:130
+#: nova/openstack/common/periodic_task.py:131
#, python-format
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr "正在跳过周期性任务 %(task)s,因为它已禁用"
@@ -101,169 +151,183 @@ msgstr "正在从表 %(table)s 中删除具有id %(id)s 的重复行"
msgid "%(num_values)d values found, of which the minimum value will be used."
msgstr ""
-#: nova/virt/libvirt/driver.py:894
+#: nova/virt/block_device.py:221
+#, python-format
+msgid "preserve multipath_id %s"
+msgstr ""
+
+#: nova/virt/firewall.py:444
+#, python-format
+msgid "instance chain %s disappeared during refresh, skipping"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:139
+msgid "Unable to force TCG mode, libguestfs too old?"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:835
+#, python-format
+msgid ""
+"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:948
msgid "Instance destroyed successfully."
msgstr "实例销毁成功。"
-#: nova/virt/libvirt/driver.py:904
+#: nova/virt/libvirt/driver.py:958
msgid "Instance may be started again."
msgstr "可再次启动实例。"
-#: nova/virt/libvirt/driver.py:914
+#: nova/virt/libvirt/driver.py:968
msgid "Going to destroy instance again."
msgstr "将再次销毁实例。"
-#: nova/virt/libvirt/driver.py:1518
+#: nova/virt/libvirt/driver.py:1576
msgid "Beginning live snapshot process"
msgstr "正在开始实时快照流程"
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1579
msgid "Beginning cold snapshot process"
msgstr "正在结束冷快照流程"
-#: nova/virt/libvirt/driver.py:1550
+#: nova/virt/libvirt/driver.py:1608
msgid "Snapshot extracted, beginning image upload"
msgstr "已抽取快照,正在开始映像上载"
-#: nova/virt/libvirt/driver.py:1562
+#: nova/virt/libvirt/driver.py:1620
msgid "Snapshot image upload complete"
msgstr "快照映像上载完成"
-#: nova/virt/libvirt/driver.py:1972
+#: nova/virt/libvirt/driver.py:2132
msgid "Instance soft rebooted successfully."
msgstr "已成功执行实例软重新引导。"
-#: nova/virt/libvirt/driver.py:2015
+#: nova/virt/libvirt/driver.py:2175
msgid "Instance shutdown successfully."
msgstr "已成功关闭实例。"
-#: nova/virt/libvirt/driver.py:2023
+#: nova/virt/libvirt/driver.py:2183
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr "在软重新引导期间,可能已重新引导实例,因此会立即返回。"
-#: nova/virt/libvirt/driver.py:2091
+#: nova/virt/libvirt/driver.py:2252
msgid "Instance rebooted successfully."
msgstr "实例成功重启。"
-#: nova/virt/libvirt/driver.py:2259
+#: nova/virt/libvirt/driver.py:2420
msgid "Instance spawned successfully."
msgstr "实例成功生产。"
-#: nova/virt/libvirt/driver.py:2275
+#: nova/virt/libvirt/driver.py:2436
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr "data:%(data)r, fpath: %(fpath)r"
-#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341
+#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr "已返回截断的控制台日志,忽略了 %d 个字节"
-#: nova/virt/libvirt/driver.py:2568
+#: nova/virt/libvirt/driver.py:2731
msgid "Creating image"
msgstr "正在创建镜像"
-#: nova/virt/libvirt/driver.py:2677
+#: nova/virt/libvirt/driver.py:2857
msgid "Using config drive"
msgstr "正在使用配置驱动器"
-#: nova/virt/libvirt/driver.py:2686
+#: nova/virt/libvirt/driver.py:2866
#, python-format
msgid "Creating config drive at %(path)s"
msgstr "正在 %(path)s 处创建配置驱动器"
-#: nova/virt/libvirt/driver.py:3223
+#: nova/virt/libvirt/driver.py:3437
msgid "Configuring timezone for windows instance to localtime"
msgstr ""
-#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821
-#: nova/virt/libvirt/driver.py:3849
-#, python-format
-msgid "libvirt can't find a domain with id: %s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:4109
+#: nova/virt/libvirt/driver.py:4320
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4115
+#: nova/virt/libvirt/driver.py:4326
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats for "
"device"
msgstr "对于实例 %s,在 libvirt 中找不到域。无法获取设备的块统计信息"
-#: nova/virt/libvirt/driver.py:4330
+#: nova/virt/libvirt/driver.py:4568
#, python-format
msgid "Instance launched has CPU info: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4986
+#: nova/virt/libvirt/driver.py:5316
msgid "Instance running successfully."
msgstr "实例正在成功运行。"
-#: nova/virt/libvirt/driver.py:5226
+#: nova/virt/libvirt/driver.py:5590
#, python-format
msgid "Deleting instance files %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5238
+#: nova/virt/libvirt/driver.py:5603
#, python-format
msgid "Deletion of %s failed"
msgstr ""
-#: nova/virt/libvirt/driver.py:5241
+#: nova/virt/libvirt/driver.py:5607
#, python-format
msgid "Deletion of %s complete"
msgstr ""
-#: nova/virt/libvirt/firewall.py:105
+#: nova/virt/libvirt/firewall.py:106
msgid "Called setup_basic_filtering in nwfilter"
msgstr "在 nwfilter 里调用 setup_basic_filtering"
-#: nova/virt/libvirt/firewall.py:113
+#: nova/virt/libvirt/firewall.py:114
msgid "Ensuring static filters"
msgstr "正在确保静态过滤器"
-#: nova/virt/libvirt/firewall.py:306
+#: nova/virt/libvirt/firewall.py:305
msgid "Attempted to unfilter instance which is not filtered"
msgstr "试图不过滤没有过滤的实例"
-#: nova/virt/libvirt/imagecache.py:191
+#: nova/virt/libvirt/imagecache.py:190
#, python-format
msgid "Writing stored info to %s"
msgstr "正在将已存储的信息写入 %s"
-#: nova/virt/libvirt/imagecache.py:401
+#: nova/virt/libvirt/imagecache.py:400
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash stored"
msgstr "(%(base_file)s) 处的映像 %(id)s:已跳过映像验证,未存储任何散列"
-#: nova/virt/libvirt/imagecache.py:410
+#: nova/virt/libvirt/imagecache.py:409
#, python-format
msgid "%(id)s (%(base_file)s): generating checksum"
msgstr "%(id)s (%(base_file)s):正在生成校验和"
-#: nova/virt/libvirt/imagecache.py:438
+#: nova/virt/libvirt/imagecache.py:437
#, python-format
msgid "Base file too young to remove: %s"
msgstr "基文件太新不需要删除:%s"
-#: nova/virt/libvirt/imagecache.py:441
+#: nova/virt/libvirt/imagecache.py:440
#, python-format
msgid "Removing base file: %s"
msgstr "正在删除基文件:%s"
-#: nova/virt/libvirt/imagecache.py:459
+#: nova/virt/libvirt/imagecache.py:458
#, python-format
msgid "image %(id)s at (%(base_file)s): checking"
msgstr "(%(base_file)s) 处的映像 %(id)s:正在检查"
-#: nova/virt/libvirt/imagecache.py:483
+#: nova/virt/libvirt/imagecache.py:482
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
@@ -272,26 +336,26 @@ msgstr ""
"(%(base_file)s) 处的映像 %(id)s:在使用中:在此节点上,%(local)d 本地;在共享"
"此实例存储器的其他节点上,%(remote)d"
-#: nova/virt/libvirt/imagecache.py:550
+#: nova/virt/libvirt/imagecache.py:549
#, python-format
msgid "Active base files: %s"
msgstr "活跃的基文件:%s"
-#: nova/virt/libvirt/imagecache.py:553
+#: nova/virt/libvirt/imagecache.py:552
#, python-format
msgid "Corrupt base files: %s"
msgstr "损坏的基文件:%s"
-#: nova/virt/libvirt/imagecache.py:557
+#: nova/virt/libvirt/imagecache.py:556
#, python-format
msgid "Removable base files: %s"
msgstr "可删除的基文件:%s"
-#: nova/virt/libvirt/utils.py:530
+#: nova/virt/libvirt/utils.py:490
msgid "findmnt tool is not installed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1352
+#: nova/virt/xenapi/vm_utils.py:1355
#, python-format
msgid ""
"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s "
diff --git a/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po b/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po
index 3366c8bcfe..82536fcae3 100644
--- a/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/zh_TW/LC_MESSAGES/nova-log-info.po
@@ -7,7 +7,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2014-06-30 06:07+0000\n"
+"POT-Creation-Date: 2014-08-18 06:03+0000\n"
"PO-Revision-Date: 2014-06-18 19:31+0000\n"
"Last-Translator: openstackjenkins \n"
"Language-Team: Chinese (Taiwan) (http://www.transifex.com/projects/p/nova/"
@@ -19,27 +19,77 @@ msgstr ""
"Generated-By: Babel 1.3\n"
"Plural-Forms: nplurals=1; plural=0;\n"
+#: nova/api/openstack/__init__.py:101
+#, python-format
+msgid "%(url)s returned with HTTP %(status)d"
+msgstr ""
+
+#: nova/api/openstack/__init__.py:294
+msgid "V3 API has been disabled by configuration"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:688
+#, python-format
+msgid "Fault thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:691
+#, python-format
+msgid "HTTP exception thrown: %s"
+msgstr ""
+
+#: nova/api/openstack/compute/contrib/os_networks.py:101
+#: nova/api/openstack/compute/contrib/os_tenant_networks.py:128
+#, python-format
+msgid "Deleting network with id %s"
+msgstr ""
+
+#: nova/compute/manager.py:2663
+#, python-format
+msgid "bringing vm to original state: '%s'"
+msgstr ""
+
+#: nova/compute/manager.py:5471
+#, python-format
+msgid ""
+"During sync_power_state the instance has a pending task (%(task)s). Skip."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:36
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:36
+msgid ""
+"Skipped adding reservations_deleted_expire_idx because an equivalent index "
+"already exists."
+msgstr ""
+
+#: nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py:58
+#: nova/db/sqlalchemy/migrate_repo/versions/248_add_expire_reservations_index.py:58
+msgid ""
+"Skipped removing reservations_deleted_expire_idx because index does not "
+"exist."
+msgstr ""
+
#: nova/openstack/common/eventlet_backdoor.py:141
#, python-format
msgid "Eventlet backdoor listening on %(port)s for process %(pid)d"
msgstr ""
-#: nova/openstack/common/lockutils.py:83
+#: nova/openstack/common/lockutils.py:82
#, python-format
msgid "Created lock path: %s"
msgstr ""
-#: nova/openstack/common/lockutils.py:250
+#: nova/openstack/common/lockutils.py:251
#, python-format
msgid "Failed to remove file %(file)s"
msgstr ""
-#: nova/openstack/common/periodic_task.py:125
+#: nova/openstack/common/periodic_task.py:126
#, python-format
msgid "Skipping periodic task %(task)s because its interval is negative"
msgstr "正在跳過定期作業 %(task)s,因為其間隔為負數"
-#: nova/openstack/common/periodic_task.py:130
+#: nova/openstack/common/periodic_task.py:131
#, python-format
msgid "Skipping periodic task %(task)s because it is disabled"
msgstr "正在跳過定期作業 %(task)s,因為它已停用"
@@ -101,169 +151,183 @@ msgstr ""
msgid "%(num_values)d values found, of which the minimum value will be used."
msgstr ""
-#: nova/virt/libvirt/driver.py:894
+#: nova/virt/block_device.py:221
+#, python-format
+msgid "preserve multipath_id %s"
+msgstr ""
+
+#: nova/virt/firewall.py:444
+#, python-format
+msgid "instance chain %s disappeared during refresh, skipping"
+msgstr ""
+
+#: nova/virt/disk/vfs/guestfs.py:139
+msgid "Unable to force TCG mode, libguestfs too old?"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:835
+#, python-format
+msgid ""
+"Unable to use bulk domain list APIs, falling back to slow code path: %(ex)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:948
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:904
+#: nova/virt/libvirt/driver.py:958
msgid "Instance may be started again."
msgstr ""
-#: nova/virt/libvirt/driver.py:914
+#: nova/virt/libvirt/driver.py:968
msgid "Going to destroy instance again."
msgstr ""
-#: nova/virt/libvirt/driver.py:1518
+#: nova/virt/libvirt/driver.py:1576
msgid "Beginning live snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:1521
+#: nova/virt/libvirt/driver.py:1579
msgid "Beginning cold snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:1550
+#: nova/virt/libvirt/driver.py:1608
msgid "Snapshot extracted, beginning image upload"
msgstr ""
-#: nova/virt/libvirt/driver.py:1562
+#: nova/virt/libvirt/driver.py:1620
msgid "Snapshot image upload complete"
msgstr ""
-#: nova/virt/libvirt/driver.py:1972
+#: nova/virt/libvirt/driver.py:2132
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2015
+#: nova/virt/libvirt/driver.py:2175
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2023
+#: nova/virt/libvirt/driver.py:2183
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
-#: nova/virt/libvirt/driver.py:2091
+#: nova/virt/libvirt/driver.py:2252
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2259
+#: nova/virt/libvirt/driver.py:2420
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2275
+#: nova/virt/libvirt/driver.py:2436
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:2314 nova/virt/libvirt/driver.py:2341
+#: nova/virt/libvirt/driver.py:2475 nova/virt/libvirt/driver.py:2502
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr ""
-#: nova/virt/libvirt/driver.py:2568
+#: nova/virt/libvirt/driver.py:2731
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:2677
+#: nova/virt/libvirt/driver.py:2857
msgid "Using config drive"
msgstr ""
-#: nova/virt/libvirt/driver.py:2686
+#: nova/virt/libvirt/driver.py:2866
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:3223
+#: nova/virt/libvirt/driver.py:3437
msgid "Configuring timezone for windows instance to localtime"
msgstr ""
-#: nova/virt/libvirt/driver.py:3694 nova/virt/libvirt/driver.py:3821
-#: nova/virt/libvirt/driver.py:3849
-#, python-format
-msgid "libvirt can't find a domain with id: %s"
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:4109
+#: nova/virt/libvirt/driver.py:4320
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4115
+#: nova/virt/libvirt/driver.py:4326
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats for "
"device"
msgstr ""
-#: nova/virt/libvirt/driver.py:4330
+#: nova/virt/libvirt/driver.py:4568
#, python-format
msgid "Instance launched has CPU info: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4986
+#: nova/virt/libvirt/driver.py:5316
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:5226
+#: nova/virt/libvirt/driver.py:5590
#, python-format
msgid "Deleting instance files %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5238
+#: nova/virt/libvirt/driver.py:5603
#, python-format
msgid "Deletion of %s failed"
msgstr ""
-#: nova/virt/libvirt/driver.py:5241
+#: nova/virt/libvirt/driver.py:5607
#, python-format
msgid "Deletion of %s complete"
msgstr ""
-#: nova/virt/libvirt/firewall.py:105
+#: nova/virt/libvirt/firewall.py:106
msgid "Called setup_basic_filtering in nwfilter"
msgstr ""
-#: nova/virt/libvirt/firewall.py:113
+#: nova/virt/libvirt/firewall.py:114
msgid "Ensuring static filters"
msgstr ""
-#: nova/virt/libvirt/firewall.py:306
+#: nova/virt/libvirt/firewall.py:305
msgid "Attempted to unfilter instance which is not filtered"
msgstr ""
-#: nova/virt/libvirt/imagecache.py:191
+#: nova/virt/libvirt/imagecache.py:190
#, python-format
msgid "Writing stored info to %s"
msgstr "正在將儲存的資訊寫入 %s"
-#: nova/virt/libvirt/imagecache.py:401
+#: nova/virt/libvirt/imagecache.py:400
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): image verification skipped, no hash stored"
msgstr "映像檔 %(id)s (%(base_file)s):已跳過映像檔驗證,未儲存雜湊"
-#: nova/virt/libvirt/imagecache.py:410
+#: nova/virt/libvirt/imagecache.py:409
#, python-format
msgid "%(id)s (%(base_file)s): generating checksum"
msgstr "%(id)s (%(base_file)s):正在產生總和檢查"
-#: nova/virt/libvirt/imagecache.py:438
+#: nova/virt/libvirt/imagecache.py:437
#, python-format
msgid "Base file too young to remove: %s"
msgstr "基本檔案太新,無法移除:%s"
-#: nova/virt/libvirt/imagecache.py:441
+#: nova/virt/libvirt/imagecache.py:440
#, python-format
msgid "Removing base file: %s"
msgstr "正在移除基本檔案:%s"
-#: nova/virt/libvirt/imagecache.py:459
+#: nova/virt/libvirt/imagecache.py:458
#, python-format
msgid "image %(id)s at (%(base_file)s): checking"
msgstr "映像檔 %(id)s (%(base_file)s):正在檢查"
-#: nova/virt/libvirt/imagecache.py:483
+#: nova/virt/libvirt/imagecache.py:482
#, python-format
msgid ""
"image %(id)s at (%(base_file)s): in use: on this node %(local)d local, "
@@ -272,26 +336,26 @@ msgstr ""
"映像檔 %(id)s (%(base_file)s):使用中:%(local)d 個在此節點上(本"
"端),%(remote)d 個在其他共用此實例儲存體的節點上"
-#: nova/virt/libvirt/imagecache.py:550
+#: nova/virt/libvirt/imagecache.py:549
#, python-format
msgid "Active base files: %s"
msgstr "作用中的基本檔案:%s"
-#: nova/virt/libvirt/imagecache.py:553
+#: nova/virt/libvirt/imagecache.py:552
#, python-format
msgid "Corrupt base files: %s"
msgstr "已毀損的基本檔案:%s"
-#: nova/virt/libvirt/imagecache.py:557
+#: nova/virt/libvirt/imagecache.py:556
#, python-format
msgid "Removable base files: %s"
msgstr "可移除的基本檔案:%s"
-#: nova/virt/libvirt/utils.py:530
+#: nova/virt/libvirt/utils.py:490
msgid "findmnt tool is not installed"
msgstr ""
-#: nova/virt/xenapi/vm_utils.py:1352
+#: nova/virt/xenapi/vm_utils.py:1355
#, python-format
msgid ""
"Image creation data, cacheable: %(cache)s, downloaded: %(downloaded)s "
diff --git a/nova/network/api.py b/nova/network/api.py
index 36c99a31e4..15c1656a92 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -20,13 +20,13 @@
from nova.compute import flavors
from nova import exception
+from nova.i18n import _
from nova.network import base_api
from nova.network import floating_ips
from nova.network import model as network_model
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as obj_base
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import policy
from nova import utils
@@ -78,13 +78,13 @@ def get_all(self, context):
belong to the user's project.
"""
try:
- return self.db.network_get_all(context, project_only=True)
+ return objects.NetworkList.get_all(context, project_only=True)
except exception.NoNetworksFound:
return []
@wrap_check_policy
def get(self, context, network_uuid):
- return self.db.network_get_by_uuid(context.elevated(), network_uuid)
+ return objects.Network.get_by_uuid(context.elevated(), network_uuid)
@wrap_check_policy
def create(self, context, **kwargs):
@@ -97,11 +97,12 @@ def delete(self, context, network_uuid):
@wrap_check_policy
def disassociate(self, context, network_uuid):
network = self.get(context, network_uuid)
- self.db.network_disassociate(context, network['id'])
+ objects.Network.disassociate(context, network.id,
+ host=True, project=True)
@wrap_check_policy
def get_fixed_ip(self, context, id):
- return self.db.fixed_ip_get(context, id)
+ return objects.FixedIP.get_by_id(context, id)
@wrap_check_policy
def get_fixed_ip_by_address(self, context, address):
@@ -111,54 +112,54 @@ def get_fixed_ip_by_address(self, context, address):
def get_floating_ip(self, context, id):
if not utils.is_int_like(id):
raise exception.InvalidID(id=id)
- return self.db.floating_ip_get(context, id)
+ return objects.FloatingIP.get_by_id(context, id)
@wrap_check_policy
def get_floating_ip_pools(self, context):
- return self.db.floating_ip_get_pools(context)
+ return objects.FloatingIP.get_pool_names(context)
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
- return self.db.floating_ip_get_by_address(context, address)
+ return objects.FloatingIP.get_by_address(context, address)
@wrap_check_policy
def get_floating_ips_by_project(self, context):
- return self.db.floating_ip_get_all_by_project(context,
- context.project_id)
+ return objects.FloatingIPList.get_by_project(context,
+ context.project_id)
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
- floating_ips = self.db.floating_ip_get_by_fixed_address(context,
- fixed_address)
- return [floating_ip['address'] for floating_ip in floating_ips]
+ floating_ips = objects.FloatingIPList.get_by_fixed_address(
+ context, fixed_address)
+ return [str(floating_ip.address) for floating_ip in floating_ips]
@wrap_check_policy
def get_instance_id_by_floating_address(self, context, address):
- fixed_ip = self.db.fixed_ip_get_by_floating_address(context, address)
+ fixed_ip = objects.FixedIP.get_by_floating_address(context, address)
if fixed_ip is None:
return None
else:
- return fixed_ip['instance_uuid']
+ return fixed_ip.instance_uuid
@wrap_check_policy
def get_vifs_by_instance(self, context, instance):
- vifs = self.db.virtual_interface_get_by_instance(context,
- instance['uuid'])
+ vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context,
+ instance.uuid)
for vif in vifs:
- if vif.get('network_id') is not None:
- network = self.db.network_get(context, vif['network_id'],
- project_only="allow_none")
- vif['net_uuid'] = network['uuid']
+ if vif.network_id is not None:
+ network = objects.Network.get_by_id(context, vif.network_id,
+ project_only='allow_none')
+ vif.net_uuid = network.uuid
return vifs
@wrap_check_policy
def get_vif_by_mac_address(self, context, mac_address):
- vif = self.db.virtual_interface_get_by_address(context,
- mac_address)
- if vif.get('network_id') is not None:
- network = self.db.network_get(context, vif['network_id'],
- project_only="allow_none")
- vif['net_uuid'] = network['uuid']
+ vif = objects.VirtualInterface.get_by_address(context,
+ mac_address)
+ if vif.network_id is not None:
+ network = objects.Network.get_by_id(context, vif.network_id,
+ project_only='allow_none')
+ vif.net_uuid = network.uuid
return vif
@wrap_check_policy
@@ -194,7 +195,7 @@ def associate_floating_ip(self, context, instance,
instance_id=orig_instance_uuid)
LOG.info(_('re-assign floating IP %(address)s from '
'instance %(instance_id)s') % msg_dict)
- orig_instance = self.db.instance_get_by_uuid(context,
+ orig_instance = objects.Instance.get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
@@ -218,7 +219,7 @@ def allocate_for_instance(self, context, instance, vpn,
"""Allocates all network structures for an instance.
:param context: The request context.
- :param instance: An Instance dict.
+ :param instance: nova.objects.instance.Instance object.
:param vpn: A boolean, if True, indicate a vpn to access the instance.
:param requested_networks: A dictionary of requested_networks,
Optional value containing network_id, fixed_ip, and port_id.
@@ -242,9 +243,9 @@ def allocate_for_instance(self, context, instance, vpn,
args = {}
args['vpn'] = vpn
args['requested_networks'] = requested_networks
- args['instance_id'] = instance['uuid']
- args['project_id'] = instance['project_id']
- args['host'] = instance['host']
+ args['instance_id'] = instance.uuid
+ args['project_id'] = instance.project_id
+ args['host'] = instance.host
args['rxtx_factor'] = flavor['rxtx_factor']
args['macs'] = macs
args['dhcp_options'] = dhcp_options
@@ -317,21 +318,21 @@ def add_network_to_project(self, context, project_id, network_uuid=None):
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
"""Associate or disassociate host or project to network."""
- network_id = self.get(context, network_uuid)['id']
+ network = self.get(context, network_uuid)
if host is not base_api.SENTINEL:
if host is None:
- self.db.network_disassociate(context, network_id,
- disassociate_host=True,
- disassociate_project=False)
+ objects.Network.disassociate(context, network.id,
+ host=True, project=False)
else:
- self.db.network_set_host(context, network_id, host)
+ network.host = host
+ network.save()
if project is not base_api.SENTINEL:
if project is None:
- self.db.network_disassociate(context, network_id,
- disassociate_host=False,
- disassociate_project=True)
+ objects.Network.disassociate(context, network.id,
+ host=False, project=True)
else:
- self.db.network_associate(context, project, network_id, True)
+ objects.Network.associate(context, project,
+ network_id=network.id, force=True)
@wrap_check_policy
def get_instance_nw_info(self, context, instance, **kwargs):
diff --git a/nova/network/base_api.py b/nova/network/base_api.py
index 54674e1244..0ac93e287a 100644
--- a/nova/network/base_api.py
+++ b/nova/network/base_api.py
@@ -18,10 +18,10 @@
from nova.db import base
from nova import hooks
+from nova.i18n import _
from nova.network import model as network_model
from nova import objects
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
@@ -171,7 +171,7 @@ def allocate_for_instance(self, context, instance, vpn,
"""Allocates all network structures for an instance.
:param context: The request context.
- :param instance: An Instance dict.
+ :param instance: nova.objects.instance.Instance object.
:param vpn: A boolean, if True, indicate a vpn to access the instance.
:param requested_networks: A dictionary of requested_networks,
Optional value containing network_id, fixed_ip, and port_id.
diff --git a/nova/network/driver.py b/nova/network/driver.py
index 973ec30f8e..04e7d607f8 100644
--- a/nova/network/driver.py
+++ b/nova/network/driver.py
@@ -16,7 +16,7 @@
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
diff --git a/nova/network/floating_ips.py b/nova/network/floating_ips.py
index 17235f6e6e..64398ce557 100644
--- a/nova/network/floating_ips.py
+++ b/nova/network/floating_ips.py
@@ -21,10 +21,10 @@
from nova import context
from nova.db import base
from nova import exception
+from nova.i18n import _
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
@@ -112,6 +112,7 @@ def allocate_for_instance(self, context, **kwargs):
nw_info = super(FloatingIP, self).allocate_for_instance(context,
**kwargs)
if CONF.auto_assign_floating_ip:
+ context = context.elevated()
# allocate a floating ip
floating_address = self.allocate_floating_ip(context, project_id,
True)
@@ -277,10 +278,10 @@ def deallocate_floating_ip(self, context, address,
LOG.exception(_("Failed to update usages deallocating "
"floating IP"))
- floating_ip_ref = objects.FloatingIP.deallocate(context, address)
- # floating_ip_ref will be None if concurrently another
+ rows_updated = objects.FloatingIP.deallocate(context, address)
+ # number of updated rows will be 0 if concurrently another
# API call has also deallocated the same floating ip
- if floating_ip_ref is None:
+ if not rows_updated:
if reservations:
QUOTAS.rollback(context, reservations, project_id=project_id)
else:
@@ -498,27 +499,22 @@ def get_floating_ip_by_address(self, context, address):
"""Returns a floating IP as a dict."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
- # NOTE(danms): Not converting to objects since it's not used
- return dict(self.db.floating_ip_get_by_address(context,
- address).iteritems())
+ return objects.FloatingIP.get_by_address(context, address)
def get_floating_ips_by_project(self, context):
"""Returns the floating IPs allocated to a project."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
- # NOTE(danms): Not converting to objects since it's not used
- ips = self.db.floating_ip_get_all_by_project(context,
+ return objects.FloatingIPList.get_by_project(context,
context.project_id)
- return [dict(ip.iteritems()) for ip in ips]
def get_floating_ips_by_fixed_address(self, context, fixed_address):
"""Returns the floating IPs associated with a fixed_address."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
- # NOTE(danms): Not converting to objects since it's not used
- floating_ips = self.db.floating_ip_get_by_fixed_address(context,
- fixed_address)
- return [floating_ip['address'] for floating_ip in floating_ips]
+ floating_ips = objects.FloatingIPList.get_by_fixed_address(
+ context, fixed_address)
+ return [str(floating_ip.address) for floating_ip in floating_ips]
def _is_stale_floating_ip_address(self, context, floating_ip):
try:
diff --git a/nova/network/l3.py b/nova/network/l3.py
index 00f62ed971..ea247d0ad8 100644
--- a/nova/network/l3.py
+++ b/nova/network/l3.py
@@ -85,9 +85,10 @@ def initialize(self, **kwargs):
networks = kwargs.get('networks', None)
if not fixed_range and networks is not None:
for network in networks:
- self.initialize_network(network['cidr'])
- else:
- linux_net.init_host()
+ if network['enable_dhcp']:
+ is_ext = (network['dhcp_server'] is not None and
+ network['dhcp_server'] != network['gateway'])
+ self.initialize_network(network['cidr'], is_ext)
linux_net.ensure_metadata_ip()
linux_net.metadata_forward()
self.initialized = True
@@ -95,8 +96,8 @@ def initialize(self, **kwargs):
def is_initialized(self):
return self.initialized
- def initialize_network(self, cidr):
- linux_net.init_host(cidr)
+ def initialize_network(self, cidr, is_external):
+ linux_net.init_host(cidr, is_external)
def initialize_gateway(self, network_ref):
mac_address = utils.generate_mac_address()
diff --git a/nova/network/ldapdns.py b/nova/network/ldapdns.py
index 4d5bb0c453..3d5e4f33f9 100644
--- a/nova/network/ldapdns.py
+++ b/nova/network/ldapdns.py
@@ -23,8 +23,8 @@
from oslo.config import cfg
from nova import exception
+from nova.i18n import _
from nova.network import dns_driver
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
@@ -42,9 +42,9 @@
default='password',
help='Password for LDAP DNS',
secret=True),
- cfg.StrOpt('ldap_dns_soa_hostmaster',
- default='hostmaster@example.org',
- help='Hostmaster for LDAP DNS driver Statement of Authority'),
+ cfg.StrOpt('ldap_dns_soa_hostmain',
+ default='hostmain@example.org',
+ help='Hostmain for LDAP DNS driver Statement of Authority'),
cfg.MultiStrOpt('ldap_dns_servers',
default=['dns.example.org'],
help='DNS Servers for LDAP DNS driver'),
@@ -156,7 +156,7 @@ def _soa(cls):
date = time.strftime('%Y%m%d%H%M%S')
soa = '%s %s %s %s %s %s %s' % (
CONF.ldap_dns_servers[0],
- CONF.ldap_dns_soa_hostmaster,
+ CONF.ldap_dns_soa_hostmain,
date,
CONF.ldap_dns_soa_refresh,
CONF.ldap_dns_soa_retry,
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index cc099801db..359a4f22b4 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -27,10 +27,10 @@
import six
from nova import exception
+from nova.i18n import _
from nova import objects
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -666,29 +666,37 @@ def metadata_accept():
iptables_manager.apply()
-def add_snat_rule(ip_range):
+def add_snat_rule(ip_range, is_external=False):
if CONF.routing_source_ip:
- for dest_range in CONF.force_snat_range or ['0.0.0.0/0']:
+ if is_external:
+ if CONF.force_snat_range:
+ snat_range = CONF.force_snat_range
+ else:
+ snat_range = []
+ else:
+ snat_range = ['0.0.0.0/0']
+ for dest_range in snat_range:
rule = ('-s %s -d %s -j SNAT --to-source %s'
% (ip_range, dest_range, CONF.routing_source_ip))
- if CONF.public_interface:
+ if not is_external and CONF.public_interface:
rule += ' -o %s' % CONF.public_interface
iptables_manager.ipv4['nat'].add_rule('snat', rule)
iptables_manager.apply()
-def init_host(ip_range):
+def init_host(ip_range, is_external=False):
"""Basic networking setup goes here."""
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
- add_snat_rule(ip_range)
+ add_snat_rule(ip_range, is_external)
rules = []
- for snat_range in CONF.force_snat_range:
- rules.append('PREROUTING -p ipv4 --ip-src %s --ip-dst %s '
- '-j redirect --redirect-target ACCEPT' %
- (ip_range, snat_range))
+ if is_external:
+ for snat_range in CONF.force_snat_range:
+ rules.append('PREROUTING -p ipv4 --ip-src %s --ip-dst %s '
+ '-j redirect --redirect-target ACCEPT' %
+ (ip_range, snat_range))
if rules:
ensure_ebtables_rules(rules, 'nat')
@@ -967,30 +975,44 @@ def _remove_dhcp_mangle_rule(dev):
def get_dhcp_opts(context, network_ref):
"""Get network's hosts config in dhcp-opts format."""
+ gateway = network_ref['gateway']
+ # NOTE(vish): if we are in multi-host mode and we are not sharing
+ # addresses, then we actually need to hand out the
+ # dhcp server address as the gateway.
+ if network_ref['multi_host'] and not (network_ref['share_address'] or
+ CONF.share_dhcp_address):
+ gateway = network_ref['dhcp_server']
hosts = []
- host = None
- if network_ref['multi_host']:
- host = CONF.host
- fixedips = objects.FixedIPList.get_by_network(context, network_ref,
- host=host)
- if fixedips:
- instance_set = set([fixedip.instance_uuid for fixedip in fixedips])
- default_gw_vif = {}
- for instance_uuid in instance_set:
- vifs = objects.VirtualInterfaceList.get_by_instance_uuid(
- context, instance_uuid)
- if vifs:
- #offer a default gateway to the first virtual interface
- default_gw_vif[instance_uuid] = vifs[0].id
-
- for fixedip in fixedips:
- if fixedip.allocated:
- instance_uuid = fixedip.instance_uuid
- if instance_uuid in default_gw_vif:
- # we don't want default gateway for this fixed ip
- if (default_gw_vif[instance_uuid] !=
- fixedip.virtual_interface_id):
- hosts.append(_host_dhcp_opts(fixedip))
+ if CONF.use_single_default_gateway:
+ # NOTE(vish): this will have serious performance implications if we
+ # are not in multi_host mode.
+ host = None
+ if network_ref['multi_host']:
+ host = CONF.host
+ fixedips = objects.FixedIPList.get_by_network(context, network_ref,
+ host=host)
+ if fixedips:
+ instance_set = set([fixedip.instance_uuid for fixedip in fixedips])
+ default_gw_vif = {}
+ for instance_uuid in instance_set:
+ vifs = objects.VirtualInterfaceList.get_by_instance_uuid(
+ context, instance_uuid)
+ if vifs:
+ # offer a default gateway to the first virtual interface
+ default_gw_vif[instance_uuid] = vifs[0].id
+
+ for fixedip in fixedips:
+ if fixedip.allocated:
+ instance_uuid = fixedip.instance_uuid
+ if instance_uuid in default_gw_vif:
+ # we don't want default gateway for this fixed ip
+ if (default_gw_vif[instance_uuid] !=
+ fixedip.virtual_interface_id):
+ hosts.append(_host_dhcp_opts(fixedip))
+ else:
+ hosts.append(_host_dhcp_opts(fixedip, gateway))
+ else:
+ hosts.append(_host_dhcp_opts(None, gateway))
return '\n'.join(hosts)
@@ -1043,12 +1065,9 @@ def restart_dhcp(context, dev, network_ref):
"""
conffile = _dhcp_file(dev, 'conf')
- if CONF.use_single_default_gateway:
- # NOTE(vish): this will have serious performance implications if we
- # are not in multi_host mode.
- optsfile = _dhcp_file(dev, 'opts')
- write_to_file(optsfile, get_dhcp_opts(context, network_ref))
- os.chmod(optsfile, 0o644)
+ optsfile = _dhcp_file(dev, 'opts')
+ write_to_file(optsfile, get_dhcp_opts(context, network_ref))
+ os.chmod(optsfile, 0o644)
_add_dhcp_mangle_rule(dev)
@@ -1081,6 +1100,7 @@ def restart_dhcp(context, dev, network_ref):
'--bind-interfaces',
'--conf-file=%s' % CONF.dnsmasq_config_file,
'--pid-file=%s' % _dhcp_file(dev, 'pid'),
+ '--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
'--dhcp-range=set:%s,%s,static,%s,%ss' %
@@ -1112,8 +1132,6 @@ def restart_dhcp(context, dev, network_ref):
cmd.append('--no-resolv')
for dns_server in dns_servers:
cmd.append('--server=%s' % dns_server)
- if CONF.use_single_default_gateway:
- cmd += ['--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts')]
_execute(*cmd, run_as_root=True)
@@ -1197,9 +1215,16 @@ def _host_dns(fixedip):
CONF.dhcp_domain)
-def _host_dhcp_opts(fixedip):
+def _host_dhcp_opts(fixedip=None, gateway=None):
"""Return an empty gateway option."""
- return '%s,%s' % (_host_dhcp_network(fixedip), 3)
+ values = []
+ if fixedip:
+ values.append(_host_dhcp_network(fixedip))
+ # NOTE(vish): 3 is the dhcp option for gateway.
+ values.append('3')
+ if gateway:
+ values.append('%s' % gateway)
+ return ','.join(values)
def _execute(*cmd, **kwargs):
@@ -1545,7 +1570,7 @@ def ensure_bridge(bridge, interface, net_attrs=None, gateway=True,
out, err = _execute('brctl', 'addif', bridge, interface,
check_exit_code=False, run_as_root=True)
if (err and err != "device %s is already a member of a bridge; "
- "can't enslave it to bridge %s.\n" % (interface, bridge)):
+ "can't ensubordinate it to bridge %s.\n" % (interface, bridge)):
msg = _('Failed to add interface: %s') % err
raise exception.NovaException(msg)
diff --git a/nova/network/manager.py b/nova/network/manager.py
index a85b6ba991..8becdfe53c 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -22,23 +22,6 @@
topologies. All of the network commands are issued to a subclass of
:class:`NetworkManager`.
-**Related Flags**
-
-:network_driver: Driver to use for network creation
-:flat_network_bridge: Bridge device for simple network instances
-:flat_interface: FlatDhcp will bridge into this interface if set
-:flat_network_dns: Dns for simple network
-:vlan_start: First VLAN for private networks
-:vpn_ip: Public IP for the cloudpipe VPN servers
-:vpn_start: First Vpn port for private networks
-:cnt_vpn_clients: Number of addresses reserved for vpn clients
-:network_size: Number of addresses in each private subnet
-:fixed_range: Fixed IP address block
-:fixed_ip_disassociate_timeout: Seconds after which a deallocated ip
- is disassociated
-:create_unique_mac_address_attempts: Number of times to attempt creating
- a unique mac address
-
"""
import datetime
@@ -56,6 +39,7 @@
from nova import conductor
from nova import context
from nova import exception
+from nova.i18n import _
from nova import ipv6
from nova import manager
from nova.network import api as network_api
@@ -67,7 +51,6 @@
from nova.objects import base as obj_base
from nova.objects import quotas as quotas_obj
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import periodic_task
@@ -313,8 +296,10 @@ def _uses_shared_ip(network):
@utils.synchronized('get_dhcp')
def _get_dhcp_ip(self, context, network_ref, host=None):
"""Get the proper dhcp address to listen on."""
+ # NOTE(vish): If we are sharing the dhcp_address then we can just
+ # return the dhcp_server from the database.
if self._uses_shared_ip(network_ref):
- return network_ref['gateway']
+ return network_ref.get('dhcp_server') or network_ref['gateway']
if not host:
host = self.host
@@ -494,7 +479,7 @@ def allocate_for_instance(self, context, **kwargs):
admin_context = context.elevated()
LOG.debug("Allocate network for instance", instance_uuid=instance_uuid,
context=context)
- networks = self._get_networks_for_instance(admin_context,
+ networks = self._get_networks_for_instance(context,
instance_uuid, project_id,
requested_networks=requested_networks)
networks_list = [self._get_network_dict(network)
@@ -503,8 +488,8 @@ def allocate_for_instance(self, context, **kwargs):
networks_list, context=context, instance_uuid=instance_uuid)
try:
- self._allocate_mac_addresses(context, instance_uuid, networks,
- macs)
+ self._allocate_mac_addresses(admin_context, instance_uuid,
+ networks, macs)
except Exception:
with excutils.save_and_reraise_exception():
# If we fail to allocate any one mac address, clean up all
@@ -520,8 +505,8 @@ def allocate_for_instance(self, context, **kwargs):
network_ids = [network['id'] for network in networks]
self.network_rpcapi.update_dns(context, network_ids)
- return self.get_instance_nw_info(context, instance_uuid, rxtx_factor,
- host)
+ return self.get_instance_nw_info(admin_context, instance_uuid,
+ rxtx_factor, host)
def deallocate_for_instance(self, context, **kwargs):
"""Handles deallocating various network resources for an instance.
@@ -583,7 +568,7 @@ def get_instance_nw_info(self, context, instance_id, rxtx_factor,
where network = dict containing pertinent data from a network db object
and info = dict containing pertinent networking data
"""
- use_slave = kwargs.get('use_slave') or False
+ use_subordinate = kwargs.get('use_subordinate') or False
if not uuidutils.is_uuid_like(instance_id):
instance_id = instance_uuid
@@ -591,7 +576,7 @@ def get_instance_nw_info(self, context, instance_id, rxtx_factor,
LOG.debug('Get instance network info', instance_uuid=instance_uuid)
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(
- context, instance_uuid, use_slave=use_slave)
+ context, instance_uuid, use_subordinate=use_subordinate)
networks = {}
for vif in vifs:
@@ -871,28 +856,43 @@ def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
user_id=quota_user)
cleanup.append(functools.partial(quotas.rollback, context))
except exception.OverQuota:
- LOG.warn(_("Quota exceeded for %s, tried to allocate "
- "fixed IP"), context.project_id)
+ LOG.debug("Quota exceeded for %s, tried to allocate "
+ "fixed IP", context.project_id)
raise exception.FixedIpLimitExceeded()
try:
if network['cidr']:
address = kwargs.get('address', None)
if address:
+ LOG.debug('Associating instance with specified fixed IP '
+ '%(address)s in network %(network)s on subnet '
+ '%(cidr)s.' %
+ {'address': address, 'network': network['id'],
+ 'cidr': network['cidr']},
+ instance=instance)
fip = objects.FixedIP.associate(context,
str(address),
instance_id,
network['id'])
else:
+ LOG.debug('Associating instance with fixed IP from pool '
+ 'in network %(network)s on subnet %(cidr)s.' %
+ {'network': network['id'],
+ 'cidr': network['cidr']},
+ instance=instance)
fip = objects.FixedIP.associate_pool(
context.elevated(), network['id'], instance_id)
+ address = str(fip.address)
+
vif = objects.VirtualInterface.get_by_instance_and_network(
context, instance_id, network['id'])
fip.allocated = True
fip.virtual_interface_id = vif.id
fip.save()
- cleanup.append(fip.disassociate)
+ cleanup.append(functools.partial(fip.disassociate, context))
+ LOG.debug('Refreshing security group members for instance.',
+ instance=instance)
self._do_trigger_security_group_members_refresh_for_instance(
instance_id)
cleanup.append(functools.partial(
@@ -915,14 +915,23 @@ def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
self.instance_dns_manager.delete_entry,
instance_id, self.instance_dns_domain))
+ LOG.debug('Setting up network %(network)s on host %(host)s.' %
+ {'network': network['id'], 'host': self.host},
+ instance=instance)
self._setup_network_on_host(context, network)
cleanup.append(functools.partial(
self._teardown_network_on_host,
context, network))
quotas.commit(context)
- LOG.debug('Allocated fixed ip %s on network %s', address,
- network['uuid'], instance=instance)
+ if address is None:
+ # TODO(mriedem): should _setup_network_on_host return the addr?
+ LOG.debug('Fixed IP is setup on network %s but not returning '
+ 'the specific IP from the base network manager.',
+ network['uuid'], instance=instance)
+ else:
+ LOG.debug('Allocated fixed ip %s on network %s', address,
+ network['uuid'], instance=instance)
return address
except Exception:
@@ -1063,19 +1072,20 @@ def _convert_int_args(kwargs):
continue
kwargs[key] = int(value)
except ValueError:
- raise ValueError(_("%s must be an integer") % key)
+ raise exception.InvalidIntValue(key=key)
def create_networks(self, context,
label, cidr=None, multi_host=None, num_networks=None,
network_size=None, cidr_v6=None,
gateway=None, gateway_v6=None, bridge=None,
bridge_interface=None, dns1=None, dns2=None,
- fixed_cidr=None, **kwargs):
+ fixed_cidr=None, allowed_start=None,
+ allowed_end=None, **kwargs):
arg_names = ("label", "cidr", "multi_host", "num_networks",
"network_size", "cidr_v6",
"gateway", "gateway_v6", "bridge",
"bridge_interface", "dns1", "dns2",
- "fixed_cidr")
+ "fixed_cidr", "allowed_start", "allowed_end")
if 'mtu' not in kwargs:
kwargs['mtu'] = CONF.network_device_mtu
if 'dhcp_server' not in kwargs:
@@ -1095,7 +1105,7 @@ def create_networks(self, context,
# Size of "label" column in nova.networks is 255, hence the restriction
if len(label) > 255:
- raise ValueError(_("Maximum allowed length for 'label' is 255."))
+ raise exception.LabelTooLong()
if not (kwargs["cidr"] or kwargs["cidr_v6"]):
raise exception.NetworkNotCreated(req="cidr or cidr_v6")
@@ -1108,10 +1118,22 @@ def create_networks(self, context,
if not kwargs[fld]:
raise exception.NetworkNotCreated(req=fld)
+ if kwargs["cidr_v6"]:
+ # NOTE(vish): just for validation
+ try:
+ netaddr.IPNetwork(kwargs["cidr_v6"])
+ except netaddr.AddrFormatError:
+ raise exception.InvalidCidr(cidr=kwargs["cidr_v6"])
+
+ if kwargs["cidr"]:
+ try:
+ fixnet = netaddr.IPNetwork(kwargs["cidr"])
+ except netaddr.AddrFormatError:
+ raise exception.InvalidCidr(cidr=kwargs["cidr"])
+
kwargs["num_networks"] = kwargs["num_networks"] or CONF.num_networks
if not kwargs["network_size"]:
if kwargs["cidr"]:
- fixnet = netaddr.IPNetwork(kwargs["cidr"])
each_subnet_size = fixnet.size / kwargs["num_networks"]
if each_subnet_size > CONF.network_size:
subnet = 32 - int(math.log(CONF.network_size, 2))
@@ -1135,17 +1157,32 @@ def create_networks(self, context,
kwargs["dns1"] = kwargs["dns1"] or CONF.flat_network_dns
if kwargs["fixed_cidr"]:
- kwargs["fixed_cidr"] = netaddr.IPNetwork(kwargs["fixed_cidr"])
+ try:
+ kwargs["fixed_cidr"] = netaddr.IPNetwork(kwargs["fixed_cidr"])
+ except netaddr.AddrFormatError:
+ raise exception.InvalidCidr(cidr=kwargs["fixed_cidr"])
LOG.debug('Create network: |%s|', kwargs)
return self._do_create_networks(context, **kwargs)
+ @staticmethod
+ def _index_of(subnet, ip):
+ try:
+ start = netaddr.IPAddress(ip)
+ except netaddr.AddrFormatError:
+ raise exception.InvalidAddress(address=ip)
+ index = start.value - subnet.value
+ if index < 0 or index >= subnet.size:
+ raise exception.AddressOutOfRange(address=ip, cidr=str(subnet))
+ return index
+
def _do_create_networks(self, context,
label, cidr, multi_host, num_networks,
network_size, cidr_v6, gateway, gateway_v6, bridge,
bridge_interface, dns1=None, dns2=None,
fixed_cidr=None, mtu=None, dhcp_server=None,
- enable_dhcp=None, share_address=None, **kwargs):
+ enable_dhcp=None, share_address=None,
+ allowed_start=None, allowed_end=None, **kwargs):
"""Create networks based on parameters."""
# NOTE(jkoelker): these are dummy values to make sure iter works
# TODO(tr3buchet): disallow carving up networks
@@ -1200,13 +1237,12 @@ def find_next(subnet):
subnets_v4.append(next_subnet)
subnet = next_subnet
else:
- raise exception.CidrConflict(_('cidr already in use'))
+ raise exception.CidrConflict(cidr=subnet,
+ other=subnet)
for used_subnet in used_subnets:
if subnet in used_subnet:
- msg = _('requested cidr (%(cidr)s) conflicts with '
- 'existing supernet (%(super)s)')
- raise exception.CidrConflict(
- msg % {'cidr': subnet, 'super': used_subnet})
+ raise exception.CidrConflict(cidr=subnet,
+ other=used_subnet)
if used_subnet in subnet:
next_subnet = find_next(subnet)
if next_subnet:
@@ -1214,11 +1250,8 @@ def find_next(subnet):
subnets_v4.append(next_subnet)
subnet = next_subnet
else:
- msg = _('requested cidr (%(cidr)s) conflicts '
- 'with existing smaller cidr '
- '(%(smaller)s)')
- raise exception.CidrConflict(
- msg % {'cidr': subnet, 'smaller': used_subnet})
+ raise exception.CidrConflict(cidr=subnet,
+ other=used_subnet)
networks = objects.NetworkList(context=context, objects=[])
subnets = itertools.izip_longest(subnets_v4, subnets_v6)
@@ -1241,17 +1274,32 @@ def find_next(subnet):
else:
net.label = label
+ bottom_reserved = self._bottom_reserved_ips
+ top_reserved = self._top_reserved_ips
extra_reserved = []
if cidr and subnet_v4:
+ current = subnet_v4[1]
+ if allowed_start:
+ val = self._index_of(subnet_v4, allowed_start)
+ current = netaddr.IPAddress(allowed_start)
+ bottom_reserved = val
+ if allowed_end:
+ val = self._index_of(subnet_v4, allowed_end)
+ top_reserved = subnet_v4.size - 1 - val
net.cidr = str(subnet_v4)
net.netmask = str(subnet_v4.netmask)
- net.gateway = gateway or str(subnet_v4[1])
net.broadcast = str(subnet_v4.broadcast)
- net.dhcp_start = str(subnet_v4[2])
+ if gateway:
+ net.gateway = gateway
+ else:
+ net.gateway = current
+ current += 1
if not dhcp_server:
dhcp_server = net.gateway
- if net.dhcp_start == dhcp_server:
- net.dhcp_start = str(subnet_v4[3])
+ net.dhcp_start = current
+ current += 1
+ if str(net.dhcp_start) == dhcp_server:
+ net.dhcp_start = current
net.dhcp_server = dhcp_server
extra_reserved.append(str(net.dhcp_server))
extra_reserved.append(str(net.gateway))
@@ -1277,8 +1325,9 @@ def find_next(subnet):
used_vlans.sort()
vlan = used_vlans[-1] + 1
- net.vpn_private_address = str(subnet_v4[2])
- net.dhcp_start = str(subnet_v4[3])
+ net.vpn_private_address = net.dhcp_start
+ extra_reserved.append(str(net.vpn_private_address))
+ net.dhcp_start = net.dhcp_start + 1
net.vlan = vlan
net.bridge = 'br%s' % vlan
@@ -1292,7 +1341,8 @@ def find_next(subnet):
if cidr and subnet_v4:
self._create_fixed_ips(context, net.id, fixed_cidr,
- extra_reserved)
+ extra_reserved, bottom_reserved,
+ top_reserved)
# NOTE(danms): Remove this in RPC API v2.0
return obj_base.obj_to_primitive(networks)
@@ -1308,8 +1358,7 @@ def delete_network(self, context, fixed_range, uuid,
LOG.debug('Delete network %s', network['uuid'])
if require_disassociated and network.project_id is not None:
- raise ValueError(_('Network must be disassociated from project %s'
- ' before delete') % network.project_id)
+ raise exception.NetworkHasProject(project_id=network.project_id)
network.destroy()
@property
@@ -1323,16 +1372,12 @@ def _top_reserved_ips(self): # pylint: disable=R0201
return 1 # broadcast
def _create_fixed_ips(self, context, network_id, fixed_cidr=None,
- extra_reserved=None):
+ extra_reserved=None, bottom_reserved=0,
+ top_reserved=0):
"""Create all fixed ips for network."""
network = self._get_network_by_id(context, network_id)
- # NOTE(vish): Should these be properties of the network as opposed
- # to properties of the manager class?
- bottom_reserved = self._bottom_reserved_ips
- top_reserved = self._top_reserved_ips
- if extra_reserved == None:
+ if extra_reserved is None:
extra_reserved = []
-
if not fixed_cidr:
fixed_cidr = netaddr.IPNetwork(network['cidr'])
num_ips = len(fixed_cidr)
@@ -1372,7 +1417,7 @@ def setup_networks_on_host(self, context, instance_id, host,
for vif in vifs:
network = objects.Network.get_by_id(context, vif.network_id)
if not network.multi_host:
- #NOTE (tr3buchet): if using multi_host, host is instance[host]
+ # NOTE (tr3buchet): if using multi_host, host is instance[host]
host = network['host']
if self.host == host or host is None:
# at this point i am the correct host, or host doesn't
@@ -1398,6 +1443,13 @@ def rpc_setup_network_on_host(self, context, network_id, teardown):
network = objects.Network.get_by_id(context, network_id)
call_func(context, network)
+ def _initialize_network(self, network):
+ if network.enable_dhcp:
+ is_ext = (network.dhcp_server is not None and
+ network.dhcp_server != network.gateway)
+ self.l3driver.initialize_network(network.cidr, is_ext)
+ self.l3driver.initialize_gateway(network)
+
def _setup_network_on_host(self, context, network):
"""Sets up network on this host."""
raise NotImplementedError()
@@ -1728,12 +1780,12 @@ def init_host(self):
def _setup_network_on_host(self, context, network):
"""Sets up network on this host."""
- network['dhcp_server'] = self._get_dhcp_ip(context, network)
+ network.dhcp_server = self._get_dhcp_ip(context, network)
- self.l3driver.initialize_network(network.get('cidr'))
- self.l3driver.initialize_gateway(network)
+ self._initialize_network(network)
- if not CONF.fake_network:
+ # NOTE(vish): if dhcp server is not set then don't dhcp
+ if not CONF.fake_network and network.enable_dhcp:
dev = self.driver.get_dev(network)
# NOTE(dprince): dhcp DB queries require elevated context
elevated = context.elevated()
@@ -1745,7 +1797,8 @@ def _setup_network_on_host(self, context, network):
network.save()
def _teardown_network_on_host(self, context, network):
- if not CONF.fake_network:
+ # NOTE(vish): if dhcp server is not set then don't dhcp
+ if not CONF.fake_network and network.enable_dhcp:
network['dhcp_server'] = self._get_dhcp_ip(context, network)
dev = self.driver.get_dev(network)
# NOTE(dprince): dhcp DB queries require elevated context
@@ -1919,8 +1972,9 @@ def _get_networks_for_instance(self, context, instance_id, project_id,
network_uuids = [uuid for (uuid, fixed_ip) in requested_networks]
networks = self._get_networks_by_uuids(context, network_uuids)
else:
- networks = objects.NetworkList.get_by_project(context,
- project_id)
+ # NOTE(vish): Allocates network on demand so requires admin.
+ networks = objects.NetworkList.get_by_project(
+ context.elevated(), project_id)
return networks
def create_networks(self, context, **kwargs):
@@ -1962,8 +2016,7 @@ def _setup_network_on_host(self, context, network):
address = network.vpn_public_address
network.dhcp_server = self._get_dhcp_ip(context, network)
- self.l3driver.initialize_network(network.get('cidr'))
- self.l3driver.initialize_gateway(network)
+ self._initialize_network(network)
# NOTE(vish): only ensure this forward if the address hasn't been set
# manually.
@@ -1975,8 +2028,9 @@ def _setup_network_on_host(self, context, network):
if not CONF.fake_network:
dev = self.driver.get_dev(network)
# NOTE(dprince): dhcp DB queries require elevated context
- elevated = context.elevated()
- self.driver.update_dhcp(elevated, dev, network)
+ if network.enable_dhcp:
+ elevated = context.elevated()
+ self.driver.update_dhcp(elevated, dev, network)
if CONF.use_ipv6:
self.driver.update_ra(context, dev, network)
gateway = utils.get_my_linklocal(dev)
@@ -1988,9 +2042,6 @@ def _teardown_network_on_host(self, context, network):
if not CONF.fake_network:
network['dhcp_server'] = self._get_dhcp_ip(context, network)
dev = self.driver.get_dev(network)
- # NOTE(dprince): dhcp DB queries require elevated context
- elevated = context.elevated()
- self.driver.update_dhcp(elevated, dev, network)
# NOTE(ethuleau): For multi hosted networks, if the network is no
# more used on this host and if VPN forwarding rule aren't handed
@@ -2001,7 +2052,8 @@ def _teardown_network_on_host(self, context, network):
not objects.Network.in_use_on_host(context, network['id'],
self.host)):
LOG.debug("Remove unused gateway %s", network['bridge'])
- self.driver.kill_dhcp(dev)
+ if network.enable_dhcp:
+ self.driver.kill_dhcp(dev)
self.l3driver.remove_gateway(network)
if not self._uses_shared_ip(network):
fip = objects.FixedIP.get_by_address(context,
@@ -2009,7 +2061,10 @@ def _teardown_network_on_host(self, context, network):
fip.allocated = False
fip.host = None
fip.save()
- else:
+ # NOTE(vish): if dhcp server is not set then don't dhcp
+ elif network.enable_dhcp:
+ # NOTE(dprince): dhcp DB queries require elevated context
+ elevated = context.elevated()
self.driver.update_dhcp(elevated, dev, network)
def _get_network_dict(self, network):
diff --git a/nova/network/minidns.py b/nova/network/minidns.py
index 6c1dce9ce3..2f9c388a5a 100644
--- a/nova/network/minidns.py
+++ b/nova/network/minidns.py
@@ -19,8 +19,8 @@
from oslo.config import cfg
from nova import exception
+from nova.i18n import _
from nova.network import dns_driver
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
CONF = cfg.CONF
diff --git a/nova/network/model.py b/nova/network/model.py
index 9febd55984..441dbf9cb8 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -20,7 +20,7 @@
import six
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import jsonutils
@@ -31,18 +31,21 @@ def ensure_string_keys(d):
# Constants for the 'vif_type' field in VIF class
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_IVS = 'ivs'
+VIF_TYPE_DVS = 'dvs'
VIF_TYPE_IOVISOR = 'iovisor'
VIF_TYPE_BRIDGE = 'bridge'
VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_MLNX_DIRECT = 'mlnx_direct'
VIF_TYPE_MIDONET = 'midonet'
+VIF_TYPE_VHOSTUSER = 'vhostuser'
VIF_TYPE_OTHER = 'other'
# Constants for dictionary keys in the 'vif_details' field in the VIF
# class
VIF_DETAIL_PORT_FILTER = 'port_filter'
VIF_DETAIL_OVS_HYBRID_PLUG = 'ovs_hybrid_plug'
+VIF_DETAILS_PHYSICAL_NETWORK = 'physical_network'
# Constants for the 'vif_model' values
VIF_MODEL_VIRTIO = 'virtio'
@@ -52,6 +55,7 @@ def ensure_string_keys(d):
VIF_MODEL_E1000 = 'e1000'
VIF_MODEL_E1000E = 'e1000e'
VIF_MODEL_NETFRONT = 'netfront'
+VIF_MODEL_SPAPR_VLAN = 'spapr-vlan'
# Constant for max length of network interface names
# eg 'bridge' in the Network class or 'devname' in
@@ -270,6 +274,7 @@ class VIF(Model):
def __init__(self, id=None, address=None, network=None, type=None,
details=None, devname=None, ovs_interfaceid=None,
qbh_params=None, qbg_params=None, active=False,
+ vhostuser_mode=None, vhostuser_path=None,
**kwargs):
super(VIF, self).__init__()
@@ -284,13 +289,15 @@ def __init__(self, id=None, address=None, network=None, type=None,
self['qbh_params'] = qbh_params
self['qbg_params'] = qbg_params
self['active'] = active
+ self['vhostuser_path'] = vhostuser_path
+ self['vhostuser_mode'] = vhostuser_mode
self._set_meta(kwargs)
def __eq__(self, other):
keys = ['id', 'address', 'network', 'type', 'details', 'devname',
'ovs_interfaceid', 'qbh_params', 'qbg_params',
- 'active']
+ 'active', 'vhostuser_path', 'vhostuser_mode']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
@@ -344,6 +351,12 @@ def is_hybrid_plug_enabled(self):
def is_neutron_filtering_enabled(self):
return self['details'].get(VIF_DETAIL_PORT_FILTER, False)
+ def get_physical_network(self):
+ phy_network = self['network']['meta'].get('physical_network')
+ if not phy_network:
+ phy_network = self['details'].get(VIF_DETAILS_PHYSICAL_NETWORK)
+ return phy_network
+
@classmethod
def hydrate(cls, vif):
vif = cls(**ensure_string_keys(vif))
diff --git a/nova/network/neutronv2/__init__.py b/nova/network/neutronv2/__init__.py
index 4a0b76adbb..e442ae8e44 100644
--- a/nova/network/neutronv2/__init__.py
+++ b/nova/network/neutronv2/__init__.py
@@ -49,7 +49,10 @@ def _get_client(token=None, admin=False):
}
if admin:
- params['username'] = CONF.neutron.admin_username
+ if CONF.neutron.admin_user_id:
+ params['user_id'] = CONF.neutron.admin_user_id
+ else:
+ params['username'] = CONF.neutron.admin_username
if CONF.neutron.admin_tenant_id:
params['tenant_id'] = CONF.neutron.admin_tenant_id
else:
diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py
index 2cc2123f9f..5566eb329a 100644
--- a/nova/network/neutronv2/api.py
+++ b/nova/network/neutronv2/api.py
@@ -20,17 +20,19 @@
from neutronclient.common import exceptions as neutron_client_exc
from oslo.config import cfg
+from nova.api.openstack import extensions
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import conductor
from nova import exception
+from nova.i18n import _, _LE, _LW
from nova.network import base_api
from nova.network import model as network_model
from nova.network import neutronv2
from nova.network.neutronv2 import constants
from nova.network.security_group import openstack_driver
+from nova import objects
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
@@ -45,6 +47,8 @@
help='Timeout value for connecting to neutron in seconds',
deprecated_group='DEFAULT',
deprecated_name='neutron_url_timeout'),
+ cfg.StrOpt('admin_user_id',
+ help='User id for connecting to neutron in admin context'),
cfg.StrOpt('admin_username',
help='Username for connecting to neutron in admin context',
deprecated_group='DEFAULT',
@@ -60,9 +64,9 @@
deprecated_name='neutron_admin_tenant_id'),
cfg.StrOpt('admin_tenant_name',
help='Tenant name for connecting to neutron in admin context. '
- 'This option is mutually exclusive with '
- 'admin_tenant_id. Note that with Keystone V3 '
- 'tenant names are only unique within a domain.',
+ 'This option will be ignored if neutron_admin_tenant_id '
+ 'is set. Note that with Keystone V3 tenant names are '
+ 'only unique within a domain.',
deprecated_group='DEFAULT',
deprecated_name='neutron_admin_tenant_name'),
cfg.StrOpt('region_name',
@@ -104,6 +108,10 @@
'neutron client requests.',
deprecated_group='DEFAULT',
deprecated_name='neutron_ca_certificates_file'),
+ cfg.BoolOpt('allow_duplicate_networks',
+ default=False,
+ help='Allow an instance to have multiple vNICs attached to '
+ 'the same Neutron network.'),
]
CONF = cfg.CONF
@@ -113,6 +121,9 @@
CONF.import_opt('flat_injected', 'nova.network.manager')
LOG = logging.getLogger(__name__)
+soft_external_network_attach_authorize = extensions.soft_core_authorizer(
+ 'network', 'attach_external_network')
+
class API(base_api.NetworkAPI):
"""API for interacting with the neutron 2.x API."""
@@ -157,7 +168,7 @@ def _get_available_networks(self, context, project_id,
nets,
net_ids)
- if not context.is_admin:
+ if not soft_external_network_attach_authorize(context):
for net in nets:
# Perform this check here rather than in validate_networks to
# ensure the check is performed every time
@@ -185,6 +196,8 @@ def _create_port(self, port_client, instance, network_id, port_req_body,
:param dhcp_opts: Optional DHCP options.
:returns: ID of the created port.
:raises PortLimitExceeded: If neutron fails with an OverQuota error.
+ :raises NoMoreFixedIps: If neutron fails with
+ IpAddressGenerationFailure error.
"""
try:
if fixed_ip:
@@ -206,18 +219,31 @@ def _create_port(self, port_client, instance, network_id, port_req_body,
LOG.debug('Successfully created port: %s', port_id,
instance=instance)
return port_id
- except neutron_client_exc.NeutronClientException as e:
- # NOTE(mriedem): OverQuota in neutron is a 409
- if e.status_code == 409:
- LOG.warning(_('Neutron error: quota exceeded'))
- raise exception.PortLimitExceeded()
+ except neutron_client_exc.OverQuotaClient:
+ LOG.warning(_LW(
+ 'Neutron error: Port quota exceeded in tenant: %s'),
+ port_req_body['port']['tenant_id'], instance=instance)
+ raise exception.PortLimitExceeded()
+ except neutron_client_exc.IpAddressGenerationFailureClient:
+ LOG.warning(_LW('Neutron error: No more fixed IPs in network: %s'),
+ network_id, instance=instance)
+ raise exception.NoMoreFixedIps()
+ except neutron_client_exc.MacAddressInUseClient:
+ LOG.warning(_LW('Neutron error: MAC address %(mac)s is already '
+ 'in use on network %(network)s.') %
+ {'mac': mac_address, 'network': network_id},
+ instance=instance)
+ raise exception.PortInUse(port_id=mac_address)
+ except neutron_client_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
- LOG.exception(_('Neutron error creating port on network %s'),
+ LOG.exception(_LE('Neutron error creating port on network %s'),
network_id, instance=instance)
def allocate_for_instance(self, context, instance, **kwargs):
"""Allocate network resources for the instance.
+ :param context: The request context.
+ :param instance: nova.objects.instance.Instance object.
:param requested_networks: optional value containing
network_id, fixed_ip, and port_id
:param security_groups: security groups to allocate for instance
@@ -244,15 +270,15 @@ def allocate_for_instance(self, context, instance, **kwargs):
available_macs = set(hypervisor_macs)
neutron = neutronv2.get_client(context)
LOG.debug('allocate_for_instance()', instance=instance)
- if not instance['project_id']:
+ if not instance.project_id:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
- reason=msg % instance['uuid'])
+ reason=msg % instance.uuid)
requested_networks = kwargs.get('requested_networks')
dhcp_opts = kwargs.get('dhcp_options', None)
ports = {}
- fixed_ips = {}
net_ids = []
+ ordered_networks = []
if requested_networks:
for network_id, fixed_ip, port_id in requested_networks:
if port_id:
@@ -262,7 +288,7 @@ def allocate_for_instance(self, context, instance, **kwargs):
if hypervisor_macs is not None:
if port['mac_address'] not in hypervisor_macs:
raise exception.PortNotUsable(port_id=port_id,
- instance=instance['uuid'])
+ instance=instance.uuid)
else:
# Don't try to use this MAC if we need to create a
# port on the fly later. Identical MACs may be
@@ -270,26 +296,37 @@ def allocate_for_instance(self, context, instance, **kwargs):
# discard rather than popping.
available_macs.discard(port['mac_address'])
network_id = port['network_id']
- ports[network_id] = port
- elif fixed_ip and network_id:
- fixed_ips[network_id] = fixed_ip
+ ports[port_id] = port
if network_id:
net_ids.append(network_id)
+ ordered_networks.append((network_id, fixed_ip, port_id))
- nets = self._get_available_networks(context, instance['project_id'],
+ nets = self._get_available_networks(context, instance.project_id,
net_ids)
-
if not nets:
- LOG.warn(_("No network configured!"), instance=instance)
+ LOG.warn(_LW("No network configured!"), instance=instance)
return network_model.NetworkInfo([])
+ # if this function is directly called without a requested_network param
+ # or if it is indirectly called through allocate_port_for_instance()
+ # with None params=(network_id=None, requested_ip=None, port_id=None):
+ if (not requested_networks
+ or requested_networks == [(None, None, None)]):
+ # bug/1267723 - if no network is requested and more
+ # than one is available then raise NetworkAmbiguous Exception
+ if len(nets) > 1:
+ msg = _("Multiple possible networks found, use a Network "
+ "ID to be more specific.")
+ raise exception.NetworkAmbiguous(msg)
+ ordered_networks.append((nets[0]['id'], None, None))
+
security_groups = kwargs.get('security_groups', [])
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
- search_opts = {'tenant_id': instance['project_id']}
+ search_opts = {'tenant_id': instance.project_id}
user_security_groups = neutron.list_security_groups(
**search_opts).get('security_groups')
@@ -321,7 +358,20 @@ def allocate_for_instance(self, context, instance, **kwargs):
touched_port_ids = []
created_port_ids = []
ports_in_requested_order = []
- for network in nets:
+ nets_in_requested_order = []
+ for network_id, fixed_ip, port_id in ordered_networks:
+ # Network lookup for available network_id
+ network = None
+ for net in nets:
+ if net['id'] == network_id:
+ network = net
+ break
+ # if network_id did not pass validate_networks() and not available
+ # here then skip it safely not continuing with a None Network
+ else:
+ continue
+
+ nets_in_requested_order.append(network)
# If security groups are requested on an instance then the
# network must has a subnet associated with it. Some plugins
# implement the port-security extension which requires
@@ -334,25 +384,25 @@ def allocate_for_instance(self, context, instance, **kwargs):
raise exception.SecurityGroupCannotBeApplied()
network_id = network['id']
- zone = 'compute:%s' % instance['availability_zone']
- port_req_body = {'port': {'device_id': instance['uuid'],
+ zone = 'compute:%s' % instance.availability_zone
+ port_req_body = {'port': {'device_id': instance.uuid,
'device_owner': zone}}
try:
- port = ports.get(network_id)
self._populate_neutron_extension_values(context, instance,
port_req_body)
# Requires admin creds to set port bindings
port_client = (neutron if not
self._has_port_binding_extension(context) else
neutronv2.get_client(context, admin=True))
- if port:
+ if port_id:
+ port = ports[port_id]
port_client.update_port(port['id'], port_req_body)
touched_port_ids.append(port['id'])
ports_in_requested_order.append(port['id'])
else:
created_port = self._create_port(
port_client, instance, network_id,
- port_req_body, fixed_ips.get(network_id),
+ port_req_body, fixed_ip,
security_group_ids, available_macs, dhcp_opts)
created_port_ids.append(created_port)
ports_in_requested_order.append(created_port)
@@ -370,17 +420,18 @@ def allocate_for_instance(self, context, instance, **kwargs):
port_client = neutron
port_client.update_port(port_id, port_req_body)
except Exception:
- msg = _("Failed to update port %s")
+ msg = _LE("Failed to update port %s")
LOG.exception(msg, port_id)
for port_id in created_port_ids:
try:
neutron.delete_port(port_id)
except Exception:
- msg = _("Failed to delete port %s")
+ msg = _LE("Failed to delete port %s")
LOG.exception(msg, port_id)
- nw_info = self.get_instance_nw_info(context, instance, networks=nets,
+ nw_info = self.get_instance_nw_info(context, instance,
+ networks=nets_in_requested_order,
port_ids=ports_in_requested_order)
# NOTE(danms): Only return info about ports we created in this run.
# In the initial allocation case, this will be everything we created,
@@ -425,7 +476,7 @@ def _populate_neutron_extension_values(self, context, instance,
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug('deallocate_for_instance()', instance=instance)
- search_opts = {'device_id': instance['uuid']}
+ search_opts = {'device_id': instance.uuid}
neutron = neutronv2.get_client(context)
data = neutron.list_ports(**search_opts)
ports = [port['id'] for port in data.get('ports', [])]
@@ -448,10 +499,10 @@ def deallocate_for_instance(self, context, instance, **kwargs):
neutron.delete_port(port)
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
- LOG.warning(_("Port %s does not exist"), port)
+ LOG.warning(_LW("Port %s does not exist"), port)
else:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to delete neutron port %s"),
+ LOG.exception(_LE("Failed to delete neutron port %s"),
port)
# NOTE(arosen): This clears out the network_cache only if the instance
@@ -475,7 +526,7 @@ def deallocate_port_for_instance(self, context, instance, port_id):
try:
neutronv2.get_client(context).delete_port(port_id)
except Exception:
- LOG.exception(_("Failed to delete neutron port %s") %
+ LOG.exception(_LE("Failed to delete neutron port %s"),
port_id)
return self.get_instance_nw_info(context, instance)
@@ -489,13 +540,13 @@ def show_port(self, context, port_id):
return neutronv2.get_client(context).show_port(port_id)
def get_instance_nw_info(self, context, instance, networks=None,
- port_ids=None, use_slave=False):
+ port_ids=None, use_subordinate=False):
"""Return network information for specified instance
and update cache.
"""
- # NOTE(geekinutah): It would be nice if use_slave had us call
- # special APIs that pummeled slaves instead of
- # the master. For now we just ignore this arg.
+ # NOTE(geekinutah): It would be nice if use_subordinate had us call
+ # special APIs that pummeled subordinates instead of
+ # the main. For now we just ignore this arg.
result = self._get_instance_nw_info(context, instance, networks,
port_ids)
base_api.update_instance_cache_with_nw_info(self, context, instance,
@@ -573,8 +624,8 @@ def add_fixed_ip_to_instance(self, context, instance, network_id):
port_req_body)
return self._get_instance_nw_info(context, instance)
except Exception as ex:
- msg = _("Unable to update port %(portid)s on subnet "
- "%(subnet_id)s with failure: %(exception)s")
+ msg = ("Unable to update port %(portid)s on subnet "
+ "%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex})
@@ -602,8 +653,8 @@ def remove_fixed_ip_from_instance(self, context, instance, address):
neutronv2.get_client(context).update_port(p['id'],
port_req_body)
except Exception as ex:
- msg = _("Unable to update port %(portid)s with"
- " failure: %(exception)s")
+ msg = ("Unable to update port %(portid)s with"
+ " failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex})
return self._get_instance_nw_info(context, instance)
@@ -648,7 +699,7 @@ def validate_networks(self, context, requested_networks, num_instances):
port = None
else:
with excutils.save_and_reraise_exception():
- LOG.exception(_("Failed to access port %s"),
+ LOG.exception(_LE("Failed to access port %s"),
port_id)
if not port:
raise exception.PortNotFound(port_id=port_id)
@@ -683,8 +734,9 @@ def validate_networks(self, context, requested_networks, num_instances):
address=fixed_ip,
instance_uuid=i_uuid)
- if net_id in instance_on_net_ids:
- raise exception.NetworkDuplicated(network_id=net_id)
+ if (not CONF.neutron.allow_duplicate_networks and
+ net_id in instance_on_net_ids):
+ raise exception.NetworkDuplicated(network_id=net_id)
instance_on_net_ids.append(net_id)
# Now check to see if all requested networks exist
@@ -702,10 +754,11 @@ def validate_networks(self, context, requested_networks, num_instances):
requested_netid_set = set(net_ids_requested)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requested_netid_set - returned_netid_set
- id_str = ''
- for _id in lostid_set:
- id_str = id_str and id_str + ', ' + _id or _id
- raise exception.NetworkNotFound(network_id=id_str)
+ if lostid_set:
+ id_str = ''
+ for _id in lostid_set:
+ id_str = id_str and id_str + ', ' + _id or _id
+ raise exception.NetworkNotFound(network_id=id_str)
# Note(PhilD): Ideally Nova would create all required ports as part of
# network validation, but port creation requires some details
@@ -796,7 +849,7 @@ def associate_floating_ip(self, context, instance,
instance_id=orig_instance_uuid)
LOG.info(_('re-assign floating IP %(address)s from '
'instance %(instance_id)s') % msg_dict)
- orig_instance = self.db.instance_get_by_uuid(context,
+ orig_instance = objects.Instance.get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
@@ -814,7 +867,10 @@ def get_all(self, context):
def get(self, context, network_uuid):
"""Get specific network for client."""
client = neutronv2.get_client(context)
- network = client.show_network(network_uuid).get('network') or {}
+ try:
+ network = client.show_network(network_uuid).get('network') or {}
+ except neutron_client_exc.NetworkNotFoundClient:
+ raise exception.NetworkNotFound(network_id=network_uuid)
network['label'] = network['name']
return network
@@ -877,7 +933,7 @@ def get_floating_ip(self, context, id):
raise exception.FloatingIpNotFound(id=id)
else:
with excutils.save_and_reraise_exception():
- LOG.exception(_('Unable to access floating IP %s'), id)
+ LOG.exception(_LE('Unable to access floating IP %s'), id)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
@@ -891,10 +947,12 @@ def _get_floating_ip_pools(self, client, project_id=None):
return data['networks']
def get_floating_ip_pools(self, context):
- """Return floating ip pools."""
+ """Return floating ip pool names."""
client = neutronv2.get_client(context)
pools = self._get_floating_ip_pools(client)
- return [{'name': n['name'] or n['id']} for n in pools]
+ # Note(salv-orlando): Return a list of names to be consistent with
+ # nova.network.api.get_floating_ip_pools
+ return [n['name'] or n['id'] for n in pools]
def _format_floating_ip_model(self, fip, pool_dict, port_dict):
pool = pool_dict[fip['floating_network_id']]
@@ -1009,8 +1067,8 @@ def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
if e.status_code == 404:
return []
with excutils.save_and_reraise_exception():
- LOG.exception(_('Unable to access floating IP %(fixed_ip)s '
- 'for port %(port_id)s'),
+ LOG.exception(_LE('Unable to access floating IP %(fixed_ip)s '
+ 'for port %(port_id)s'),
{'fixed_ip': fixed_ip, 'port_id': port})
return data['floatingips']
@@ -1068,7 +1126,7 @@ def migrate_instance_finish(self, context, instance, migration):
neutron.update_port(p['id'], port_req_body)
except Exception:
with excutils.save_and_reraise_exception():
- msg = _("Unable to update host of port %s")
+ msg = _LE("Unable to update host of port %s")
LOG.exception(msg, p['id'])
def add_network_to_project(self, context, project_id, network_uuid=None):
@@ -1104,9 +1162,9 @@ def _nw_info_build_network(self, port, networks, subnets):
break
else:
tenant_id = port['tenant_id']
- LOG.warning(_("Network %(id)s not matched with the tenants "
- "network! The ports tenant %(tenant_id)s will be "
- "used."),
+ LOG.warning(_LW("Network %(id)s not matched with the tenants "
+ "network! The ports tenant %(tenant_id)s will be "
+ "used."),
{'id': port['network_id'], 'tenant_id': tenant_id})
bridge = None
@@ -1122,8 +1180,15 @@ def _nw_info_build_network(self, port, networks, subnets):
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = "brq" + port['network_id']
should_create_bridge = True
+ elif vif_type == network_model.VIF_TYPE_DVS:
+ if network_name is None:
+ bridge = port['network_id']
+ else:
+ bridge = '%s-%s' % (network_name, port['network_id'])
- if bridge is not None:
+ # Prune the bridge name if necessary. For the DVS this is not done
+ # as the bridge is a '-'.
+ if bridge is not None and vif_type != network_model.VIF_TYPE_DVS:
bridge = bridge[:network_model.NIC_NAME_LEN]
network = network_model.Network(
diff --git a/nova/network/nova_ipam_lib.py b/nova/network/nova_ipam_lib.py
index 58afe93d23..f49904fca5 100644
--- a/nova/network/nova_ipam_lib.py
+++ b/nova/network/nova_ipam_lib.py
@@ -45,18 +45,20 @@ def get_subnets_by_net_id(self, context, tenant_id, net_id, _vif_id=None):
'network_id': n.uuid,
'cidr': n.cidr,
'gateway': n.gateway,
+ 'dhcp_server': getattr(n, 'dhcp_server'),
'broadcast': n.broadcast,
'netmask': n.netmask,
'version': 4,
'dns1': n.dns1,
'dns2': n.dns2}
- #TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4.
- # this is probably bad as there is no way to add v6
- # dns to nova
+ # TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4.
+ # this is probably bad as there is no way to add v6
+ # dns to nova
subnet_v6 = {
'network_id': n.uuid,
'cidr': n.cidr_v6,
'gateway': n.gateway_v6,
+ 'dhcp_server': None,
'broadcast': None,
'netmask': n.netmask_v6,
'version': 6,
diff --git a/nova/network/rpcapi.py b/nova/network/rpcapi.py
index d8c0392e59..99034aeb90 100644
--- a/nova/network/rpcapi.py
+++ b/nova/network/rpcapi.py
@@ -46,39 +46,41 @@ class NetworkAPI(object):
API version history:
- 1.0 - Initial version.
- 1.1 - Adds migrate_instance_[start|finish]
- 1.2 - Make migrate_instance_[start|finish] a little more flexible
- 1.3 - Adds fanout cast update_dns for multi_host networks
- 1.4 - Add get_backdoor_port()
- 1.5 - Adds associate
- 1.6 - Adds instance_uuid to _{dis,}associate_floating_ip
- 1.7 - Adds method get_floating_ip_pools to replace get_floating_pools
- 1.8 - Adds macs to allocate_for_instance
- 1.9 - Adds rxtx_factor to [add|remove]_fixed_ip, removes instance_uuid
- from allocate_for_instance and instance_get_nw_info
+ * 1.0 - Initial version.
+ * 1.1 - Adds migrate_instance_[start|finish]
+ * 1.2 - Make migrate_instance_[start|finish] a little more flexible
+ * 1.3 - Adds fanout cast update_dns for multi_host networks
+ * 1.4 - Add get_backdoor_port()
+ * 1.5 - Adds associate
+ * 1.6 - Adds instance_uuid to _{dis,}associate_floating_ip
+ * 1.7 - Adds method get_floating_ip_pools to replace get_floating_pools
+ * 1.8 - Adds macs to allocate_for_instance
+ * 1.9 - Adds rxtx_factor to [add|remove]_fixed_ip, removes
+ instance_uuid from allocate_for_instance and
+ instance_get_nw_info
... Grizzly supports message version 1.9. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.9.
- 1.10- Adds (optional) requested_networks to deallocate_for_instance
+ * 1.10- Adds (optional) requested_networks to deallocate_for_instance
... Havana supports message version 1.10. So, any changes to existing
methods in 1.x after that point should be done such that they can
handle the version_cap being set to 1.10.
- NOTE: remove unused method get_vifs_by_instance()
- NOTE: remove unused method get_vif_by_mac_address()
- NOTE: remove unused method get_network()
- NOTE: remove unused method get_all_networks()
- 1.11 - Add instance to deallocate_for_instance(). Remove instance_id,
- project_id, and host.
- 1.12 - Add instance to deallocate_fixed_ip()
+ * NOTE: remove unused method get_vifs_by_instance()
+ * NOTE: remove unused method get_vif_by_mac_address()
+ * NOTE: remove unused method get_network()
+ * NOTE: remove unused method get_all_networks()
+ * 1.11 - Add instance to deallocate_for_instance().
+ Remove instance_id, project_id, and host.
+ * 1.12 - Add instance to deallocate_fixed_ip()
... Icehouse supports message version 1.12. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.12.
+
'''
VERSION_ALIASES = {
diff --git a/nova/network/security_group/neutron_driver.py b/nova/network/security_group/neutron_driver.py
index 2c07a2dc37..179bc6f5b2 100644
--- a/nova/network/security_group/neutron_driver.py
+++ b/nova/network/security_group/neutron_driver.py
@@ -23,11 +23,11 @@
from nova.compute import api as compute_api
from nova import exception
+from nova.i18n import _
from nova.network import neutronv2
from nova.network.security_group import security_group_base
from nova import objects
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import utils
@@ -505,3 +505,23 @@ def populate_security_groups(self, instance, security_groups):
# in the nova database if using the neutron driver
instance['security_groups'] = objects.SecurityGroupList()
instance['security_groups'].objects = []
+
+ def get_default_rule(self, context, id):
+ msg = _("Network driver does not support this function.")
+ raise exc.HTTPNotImplemented(explanation=msg)
+
+ def get_all_default_rules(self, context):
+ msg = _("Network driver does not support this function.")
+ raise exc.HTTPNotImplemented(explanation=msg)
+
+ def add_default_rules(self, context, vals):
+ msg = _("Network driver does not support this function.")
+ raise exc.HTTPNotImplemented(explanation=msg)
+
+ def remove_default_rules(self, context, rule_ids):
+ msg = _("Network driver does not support this function.")
+ raise exc.HTTPNotImplemented(explanation=msg)
+
+ def default_rule_exists(self, context, values):
+ msg = _("Network driver does not support this function.")
+ raise exc.HTTPNotImplemented(explanation=msg)
diff --git a/nova/network/security_group/security_group_base.py b/nova/network/security_group/security_group_base.py
index 6710b2d2af..5b9edc198c 100644
--- a/nova/network/security_group/security_group_base.py
+++ b/nova/network/security_group/security_group_base.py
@@ -22,7 +22,7 @@
from oslo.config import cfg
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova import utils
CONF = cfg.CONF
@@ -86,11 +86,11 @@ def _new_ingress_rule(ip_protocol, from_port, to_port,
to_port = int(to_port)
except ValueError:
if ip_protocol.upper() == 'ICMP':
- raise exception.InvalidInput(reason="Type and"
- " Code must be integers for ICMP protocol type")
+ raise exception.InvalidInput(reason=_("Type and"
+ " Code must be integers for ICMP protocol type"))
else:
- raise exception.InvalidInput(reason="To and From ports "
- "must be integers")
+ raise exception.InvalidInput(reason=_("To and From ports "
+ "must be integers"))
if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
raise exception.InvalidIpProtocol(protocol=ip_protocol)
diff --git a/nova/notifications.py b/nova/notifications.py
index f48542edfb..f0e302a920 100644
--- a/nova/notifications.py
+++ b/nova/notifications.py
@@ -25,13 +25,13 @@
from nova.compute import flavors
import nova.context
from nova import db
+from nova.i18n import _
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova.objects import base as obj_base
from nova.openstack.common import context as common_context
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log
from nova.openstack.common import timeutils
from nova import rpc
@@ -319,10 +319,15 @@ def info_from_instance(context, instance_ref, network_info,
"""Get detailed instance information for an instance which is common to all
notifications.
- :param network_info: network_info provided if not None
- :param system_metadata: system_metadata DB entries for the instance,
- if not None. *NOTE*: Currently unused here in trunk, but needed for
- potential custom modifications.
+ :param:network_info: network_info provided if not None
+ :param:system_metadata: system_metadata DB entries for the instance,
+ if not None
+
+ .. note::
+
+ Currently unused here in trunk, but needed for potential custom
+ modifications.
+
"""
def null_safe_str(s):
diff --git a/nova/objects/aggregate.py b/nova/objects/aggregate.py
index 383f6b51cd..0d58cb85ee 100644
--- a/nova/objects/aggregate.py
+++ b/nova/objects/aggregate.py
@@ -151,7 +151,8 @@ class AggregateList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added key argument to get_by_host()
# Aggregate <= version 1.1
- VERSION = '1.1'
+ # Version 1.2: Added get_by_metadata_key
+ VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('Aggregate'),
@@ -160,8 +161,21 @@ class AggregateList(base.ObjectListBase, base.NovaObject):
'1.0': '1.1',
'1.1': '1.1',
# NOTE(danms): Aggregate was at 1.1 before we added this
+ '1.2': '1.1',
}
+ @classmethod
+ def _filter_db_aggregates(cls, db_aggregates, hosts):
+ if not isinstance(hosts, set):
+ hosts = set(hosts)
+ filtered_aggregates = []
+ for db_aggregate in db_aggregates:
+ for host in db_aggregate['hosts']:
+ if host in hosts:
+ filtered_aggregates.append(db_aggregate)
+ break
+ return filtered_aggregates
+
@base.remotable_classmethod
def get_all(cls, context):
db_aggregates = db.aggregate_get_all(context)
@@ -173,3 +187,11 @@ def get_by_host(cls, context, host, key=None):
db_aggregates = db.aggregate_get_by_host(context, host, key=key)
return base.obj_make_list(context, cls(context), objects.Aggregate,
db_aggregates)
+
+ @base.remotable_classmethod
+ def get_by_metadata_key(cls, context, key, hosts=None):
+ db_aggregates = db.aggregate_get_by_metadata_key(context, key=key)
+ if hosts:
+ db_aggregates = cls._filter_db_aggregates(db_aggregates, hosts)
+ return base.obj_make_list(context, cls(context), objects.Aggregate,
+ db_aggregates)
diff --git a/nova/objects/base.py b/nova/objects/base.py
index 79d07b2a0d..666e82f80d 100644
--- a/nova/objects/base.py
+++ b/nova/objects/base.py
@@ -24,9 +24,9 @@
from nova import context
from nova import exception
+from nova.i18n import _, _LE
from nova import objects
from nova.objects import fields
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import versionutils
@@ -83,8 +83,7 @@ def setter(self, value, name=name, field=field):
return setattr(self, attrname, field_value)
except Exception:
attr = "%s.%s" % (self.obj_name(), name)
- LOG.exception(_('Error setting %(attr)s') %
- {'attr': attr})
+ LOG.exception(_LE('Error setting %(attr)s'), {'attr': attr})
raise
setattr(cls, name, property(getter, setter))
@@ -188,7 +187,13 @@ def wrapper(self, *args, **kwargs):
for key, value in updates.iteritems():
if key in self.fields:
field = self.fields[key]
- self[key] = field.from_primitive(self, key, value)
+ # NOTE(ndipanov): Since NovaObjectSerializer will have
+ # deserialized any object fields into objects already,
+ # we do not try to deserialize them again here.
+ if isinstance(value, NovaObject):
+ self[key] = value
+ else:
+ self[key] = field.from_primitive(self, key, value)
self.obj_reset_changes()
self._changed_fields = set(updates.get('obj_what_changed', []))
return result
@@ -335,13 +340,25 @@ def obj_make_compatible(self, primitive, target_version):
This is responsible for taking the primitive representation of
an object and making it suitable for the given target_version.
This may mean converting the format of object attributes, removing
- attributes that have been added since the target version, etc.
+ attributes that have been added since the target version, etc. In
+ general:
+
+ - If a new version of an object adds a field, this routine
+ should remove it for older versions.
+ - If a new version changed or restricted the format of a field, this
+ should convert it back to something a client knowing only of the
+ older version will tolerate.
+ - If an object that this object depends on is bumped, then this
+ object should also take a version bump. Then, this routine should
+ backlevel the dependent object (by calling its obj_make_compatible())
+ if the requested version of this object is older than the version
+ where the new dependent object was added.
:param:primitive: The result of self.obj_to_primitive()
:param:target_version: The version string requested by the recipient
- of the object.
- :param:raises: nova.exception.UnsupportedObjectError if conversion
- is not possible for some reason.
+ of the object
+ :raises: nova.exception.UnsupportedObjectError if conversion
+ is not possible for some reason
"""
pass
@@ -616,15 +633,19 @@ def _process_iterable(self, context, action_fn, values):
items from values having had action applied.
"""
iterable = values.__class__
- if iterable == set:
+ if issubclass(iterable, dict):
+ return iterable(**dict((k, action_fn(context, v))
+ for k, v in six.iteritems(values)))
+ else:
# NOTE(danms): A set can't have an unhashable value inside, such as
# a dict. Convert sets to tuples, which is fine, since we can't
# send them over RPC anyway.
- iterable = tuple
- return iterable([action_fn(context, value) for value in values])
+ if iterable == set:
+ iterable = tuple
+ return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
- if isinstance(entity, (tuple, list, set)):
+ if isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
@@ -635,7 +656,7 @@ def serialize_entity(self, context, entity):
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'nova_object.name' in entity:
entity = self._process_object(context, entity)
- elif isinstance(entity, (tuple, list, set)):
+ elif isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
diff --git a/nova/objects/block_device.py b/nova/objects/block_device.py
index 40d06e927e..e3cb27cafa 100644
--- a/nova/objects/block_device.py
+++ b/nova/objects/block_device.py
@@ -17,10 +17,10 @@
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
+from nova.i18n import _
from nova import objects
from nova.objects import base
from nova.objects import fields
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
@@ -188,7 +188,7 @@ def obj_load_attr(self, attrname):
class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: BlockDeviceMapping <= version 1.1
- # Version 1.2: Added use_slave to get_by_instance_uuid
+ # Version 1.2: Added use_subordinate to get_by_instance_uuid
VERSION = '1.2'
fields = {
@@ -201,9 +201,9 @@ class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject):
}
@base.remotable_classmethod
- def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False):
+ def get_by_instance_uuid(cls, context, instance_uuid, use_subordinate=False):
db_bdms = db.block_device_mapping_get_all_by_instance(
- context, instance_uuid, use_slave=use_slave)
+ context, instance_uuid, use_subordinate=use_subordinate)
return base.obj_make_list(
context, cls(), objects.BlockDeviceMapping, db_bdms or [])
diff --git a/nova/objects/compute_node.py b/nova/objects/compute_node.py
index 3171816e32..6ed291ac18 100644
--- a/nova/objects/compute_node.py
+++ b/nova/objects/compute_node.py
@@ -18,6 +18,7 @@
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import jsonutils
+from nova import utils
class ComputeNode(base.NovaPersistentObject, base.NovaObject):
@@ -26,7 +27,8 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
# Version 1.2: String attributes updated to support unicode
# Version 1.3: Added stats field
# Version 1.4: Added host ip field
- VERSION = '1.4'
+ # Version 1.5: Added numa_topology field
+ VERSION = '1.5'
fields = {
'id': fields.IntegerField(read_only=True),
@@ -49,11 +51,13 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
'metrics': fields.StringField(nullable=True),
'stats': fields.DictOfNullableStringsField(nullable=True),
'host_ip': fields.IPAddressField(nullable=True),
+ 'numa_topology': fields.StringField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
- target_version = (int(target_version.split('.')[0]),
- int(target_version.split('.')[1]))
+ target_version = utils.convert_version_to_tuple(target_version)
+ if target_version < (1, 5) and 'numa_topology' in primitive:
+ del primitive['numa_topology']
if target_version < (1, 4) and 'host_ip' in primitive:
del primitive['host_ip']
if target_version < (1, 3) and 'stats' in primitive:
@@ -137,7 +141,8 @@ class ComputeNodeList(base.ObjectListBase, base.NovaObject):
# Version 1.1 ComputeNode version 1.3
# Version 1.2 Add get_by_service()
# Version 1.3 ComputeNode version 1.4
- VERSION = '1.3'
+ # Version 1.4 ComputeNode version 1.5
+ VERSION = '1.4'
fields = {
'objects': fields.ListOfObjectsField('ComputeNode'),
}
@@ -147,6 +152,7 @@ class ComputeNodeList(base.ObjectListBase, base.NovaObject):
'1.1': '1.3',
'1.2': '1.3',
'1.3': '1.4',
+ '1.4': '1.5',
}
@base.remotable_classmethod
diff --git a/nova/objects/ec2.py b/nova/objects/ec2.py
index 7642620cb7..7556bd57c9 100644
--- a/nova/objects/ec2.py
+++ b/nova/objects/ec2.py
@@ -92,3 +92,79 @@ def get_by_id(cls, context, ec2_id):
db_vmap = db.ec2_volume_get_by_id(context, ec2_id)
if db_vmap:
return cls._from_db_object(context, cls(context), db_vmap)
+
+
+class EC2SnapshotMapping(base.NovaPersistentObject, base.NovaObject):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'id': fields.IntegerField(read_only=True),
+ 'uuid': fields.UUIDField(),
+ }
+
+ @staticmethod
+ def _from_db_object(context, smap, db_smap):
+ for field in smap.fields:
+ smap[field] = db_smap[field]
+ smap._context = context
+ smap.obj_reset_changes()
+ return smap
+
+ @base.remotable
+ def create(self, context):
+ if self.obj_attr_is_set('id'):
+ raise exception.ObjectActionError(action='create',
+ reason='already created')
+ db_smap = db.ec2_snapshot_create(context, self.uuid)
+ self._from_db_object(context, self, db_smap)
+
+ @base.remotable_classmethod
+ def get_by_uuid(cls, context, snapshot_uuid):
+ db_smap = db.ec2_snapshot_get_by_uuid(context, snapshot_uuid)
+ if db_smap:
+ return cls._from_db_object(context, cls(context), db_smap)
+
+ @base.remotable_classmethod
+ def get_by_id(cls, context, ec2_id):
+ db_smap = db.ec2_snapshot_get_by_ec2_id(context, ec2_id)
+ if db_smap:
+ return cls._from_db_object(context, cls(context), db_smap)
+
+
+class S3ImageMapping(base.NovaPersistentObject, base.NovaObject):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'id': fields.IntegerField(read_only=True),
+ 'uuid': fields.UUIDField(),
+ }
+
+ @staticmethod
+ def _from_db_object(context, s3imap, db_s3imap):
+ for field in s3imap.fields:
+ s3imap[field] = db_s3imap[field]
+ s3imap._context = context
+ s3imap.obj_reset_changes()
+ return s3imap
+
+ @base.remotable
+ def create(self, context):
+ if self.obj_attr_is_set('id'):
+ raise exception.ObjectActionError(action='create',
+ reason='already created')
+ db_s3imap = db.s3_image_create(context, self.uuid)
+ self._from_db_object(context, self, db_s3imap)
+
+ @base.remotable_classmethod
+ def get_by_uuid(cls, context, s3_image_uuid):
+ db_s3imap = db.s3_image_get_by_uuid(context, s3_image_uuid)
+ if db_s3imap:
+ return cls._from_db_object(context, cls(context), db_s3imap)
+
+ @base.remotable_classmethod
+ def get_by_id(cls, context, s3_id):
+ db_s3imap = db.s3_image_get(context, s3_id)
+ if db_s3imap:
+ return cls._from_db_object(context, cls(context), db_s3imap)
diff --git a/nova/objects/fields.py b/nova/objects/fields.py
index 57936a28e9..372e7e600c 100644
--- a/nova/objects/fields.py
+++ b/nova/objects/fields.py
@@ -19,8 +19,8 @@
import netaddr
import six
+from nova.i18n import _
from nova.network import model as network_model
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
@@ -405,10 +405,10 @@ def coerce(self, obj, attr, value):
raise ValueError(_('A dict is required here'))
for key, element in value.items():
if not isinstance(key, six.string_types):
- #NOTE(guohliu) In order to keep compatibility with python3
- #we need to use six.string_types rather than basestring here,
- #since six.string_types is a tuple, so we need to pass the
- #real type in.
+ # NOTE(guohliu) In order to keep compatibility with python3
+ # we need to use six.string_types rather than basestring here,
+ # since six.string_types is a tuple, so we need to pass the
+ # real type in.
raise KeyTypeError(six.string_types[0], key)
value[key] = self._element_type.coerce(
obj, '%s["%s"]' % (attr, key), element)
@@ -434,6 +434,30 @@ def stringify(self, value):
for key, val in sorted(value.items())]))
+class Set(CompoundFieldType):
+ def coerce(self, obj, attr, value):
+ if not isinstance(value, set):
+ raise ValueError(_('A set is required here'))
+
+ coerced = set()
+ for element in value:
+ coerced.add(self._element_type.coerce(
+ obj, '%s["%s"]' % (attr, element), element))
+ return coerced
+
+ def to_primitive(self, obj, attr, value):
+ return tuple(
+ self._element_type.to_primitive(obj, attr, x) for x in value)
+
+ def from_primitive(self, obj, attr, value):
+ return set([self._element_type.from_primitive(obj, attr, x)
+ for x in value])
+
+ def stringify(self, value):
+ return 'set([%s])' % (
+ ','.join([self._element_type.stringify(x) for x in value]))
+
+
class Object(FieldType):
def __init__(self, obj_name, **kwargs):
self._obj_name = obj_name
@@ -458,6 +482,10 @@ def to_primitive(obj, attr, value):
def from_primitive(obj, attr, value):
# FIXME(danms): Avoid circular import from base.py
from nova.objects import base as obj_base
+ # NOTE (ndipanov): If they already got hydrated by the serializer, just
+ # pass them back unchanged
+ if isinstance(value, obj_base.NovaObject):
+ return value
return obj_base.NovaObject.obj_from_primitive(value, obj._context)
def describe(self):
@@ -571,6 +599,14 @@ class ListOfStringsField(AutoTypedField):
AUTO_TYPE = List(String())
+class SetOfIntegersField(AutoTypedField):
+ AUTO_TYPE = Set(Integer())
+
+
+class ListOfDictOfNullableStringsField(AutoTypedField):
+ AUTO_TYPE = List(Dict(String(), nullable=True))
+
+
class ObjectField(AutoTypedField):
def __init__(self, objtype, **kwargs):
self.AUTO_TYPE = Object(objtype)
diff --git a/nova/objects/fixed_ip.py b/nova/objects/fixed_ip.py
index 0ceaf38f67..889047b4ea 100644
--- a/nova/objects/fixed_ip.py
+++ b/nova/objects/fixed_ip.py
@@ -94,8 +94,9 @@ def get_by_address(cls, context, address, expected_attrs=None):
@obj_base.remotable_classmethod
def get_by_floating_address(cls, context, address):
- db_fixedip = db.fixed_ip_get_by_floating_address(context, address)
- return cls._from_db_object(context, cls(context), db_fixedip)
+ db_fixedip = db.fixed_ip_get_by_floating_address(context, str(address))
+ if db_fixedip is not None:
+ return cls._from_db_object(context, cls(context), db_fixedip)
@obj_base.remotable_classmethod
def get_by_network_and_host(cls, context, network_id, host):
diff --git a/nova/objects/floating_ip.py b/nova/objects/floating_ip.py
index d74ba424e7..d65c25ce35 100644
--- a/nova/objects/floating_ip.py
+++ b/nova/objects/floating_ip.py
@@ -45,7 +45,8 @@ def _from_db_object(context, floatingip, db_floatingip,
for field in floatingip.fields:
if field not in FLOATING_IP_OPTIONAL_ATTRS:
floatingip[field] = db_floatingip[field]
- if 'fixed_ip' in expected_attrs:
+ if ('fixed_ip' in expected_attrs and
+ db_floatingip['fixed_ip'] is not None):
floatingip.fixed_ip = objects.FixedIP._from_db_object(
context, objects.FixedIP(context), db_floatingip['fixed_ip'])
floatingip._context = context
@@ -75,7 +76,7 @@ def get_by_id(cls, context, id):
@obj_base.remotable_classmethod
def get_by_address(cls, context, address):
- db_floatingip = db.floating_ip_get_by_address(context, address)
+ db_floatingip = db.floating_ip_get_by_address(context, str(address))
return cls._from_db_object(context, cls(context), db_floatingip)
@obj_base.remotable_classmethod
@@ -90,8 +91,8 @@ def allocate_address(cls, context, project_id, pool, auto_assigned=False):
@obj_base.remotable_classmethod
def associate(cls, context, floating_address, fixed_address, host):
db_fixed = db.floating_ip_fixed_ip_associate(context,
- floating_address,
- fixed_address,
+ str(floating_address),
+ str(fixed_address),
host)
if db_fixed is None:
return None
@@ -106,15 +107,15 @@ def associate(cls, context, floating_address, fixed_address, host):
@obj_base.remotable_classmethod
def deallocate(cls, context, address):
- db.floating_ip_deallocate(context, address)
+ return db.floating_ip_deallocate(context, str(address))
@obj_base.remotable_classmethod
def destroy(cls, context, address):
- db.floating_ip_destroy(context, address)
+ db.floating_ip_destroy(context, str(address))
@obj_base.remotable_classmethod
def disassociate(cls, context, address):
- db_fixed = db.floating_ip_disassociate(context, address)
+ db_fixed = db.floating_ip_disassociate(context, str(address))
return cls(context=context, address=address,
fixed_ip_id=db_fixed['id'],
@@ -136,6 +137,14 @@ def save(self, context):
if 'address' in updates:
raise exception.ObjectActionError(action='save',
reason='address is not mutable')
+ if 'fixed_ip_id' in updates:
+ reason = 'fixed_ip_id is not mutable'
+ raise exception.ObjectActionError(action='save', reason=reason)
+
+ # NOTE(danms): Make sure we don't pass the calculated fixed_ip
+ # relationship to the DB update method
+ updates.pop('fixed_ip', None)
+
db_floatingip = db.floating_ip_update(context, str(self.address),
updates)
self._from_db_object(context, self, db_floatingip)
@@ -172,8 +181,8 @@ def get_by_project(cls, context, project_id):
@obj_base.remotable_classmethod
def get_by_fixed_address(cls, context, fixed_address):
- db_floatingips = db.floating_ip_get_by_fixed_address(context,
- fixed_address)
+ db_floatingips = db.floating_ip_get_by_fixed_address(
+ context, str(fixed_address))
return obj_base.obj_make_list(context, cls(context),
objects.FloatingIP, db_floatingips)
diff --git a/nova/objects/instance.py b/nova/objects/instance.py
index bd046304b5..320a13d665 100644
--- a/nova/objects/instance.py
+++ b/nova/objects/instance.py
@@ -14,15 +14,14 @@
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
-from nova import compute
from nova.compute import flavors
from nova import db
from nova import exception
+from nova.i18n import _LE
from nova import notifications
from nova import objects
from nova.objects import base
from nova.objects import fields
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
@@ -69,7 +68,7 @@ class Instance(base.NovaPersistentObject, base.NovaObject):
# Version 1.7: String attributes updated to support unicode
# Version 1.8: 'security_groups' and 'pci_devices' cannot be None
# Version 1.9: Make uuid a non-None real string
- # Version 1.10: Added use_slave to refresh and get_by_uuid
+ # Version 1.10: Added use_subordinate to refresh and get_by_uuid
# Version 1.11: Update instance from database during destroy
# Version 1.12: Added ephemeral_key_uuid
# Version 1.13: Added delete_metadata_key()
@@ -196,8 +195,7 @@ def _obj_from_primitive(cls, context, objver, primitive):
return self
def obj_make_compatible(self, primitive, target_version):
- target_version = (int(target_version.split('.')[0]),
- int(target_version.split('.')[1]))
+ target_version = utils.convert_version_to_tuple(target_version)
unicode_attributes = ['user_id', 'project_id', 'image_ref',
'kernel_id', 'ramdisk_id', 'hostname',
'key_name', 'key_data', 'host', 'node',
@@ -275,11 +273,6 @@ def _from_db_object(context, instance, db_inst, expected_attrs=None):
objects.InstanceFault.get_latest_for_instance(
context, instance.uuid))
- if 'pci_devices' in expected_attrs:
- pci_devices = base.obj_make_list(
- context, objects.PciDeviceList(context),
- objects.PciDevice, db_inst['pci_devices'])
- instance['pci_devices'] = pci_devices
if 'info_cache' in expected_attrs:
if db_inst['info_cache'] is None:
instance.info_cache = None
@@ -291,6 +284,15 @@ def _from_db_object(context, instance, db_inst, expected_attrs=None):
instance.info_cache._from_db_object(context,
instance.info_cache,
db_inst['info_cache'])
+
+ # TODO(danms): If we are updating these on a backlevel instance,
+ # we'll end up sending back new versions of these objects (see
+ # above note for new info_caches
+ if 'pci_devices' in expected_attrs:
+ pci_devices = base.obj_make_list(
+ context, objects.PciDeviceList(context),
+ objects.PciDevice, db_inst['pci_devices'])
+ instance['pci_devices'] = pci_devices
if 'security_groups' in expected_attrs:
sec_groups = base.obj_make_list(
context, objects.SecurityGroupList(context),
@@ -302,13 +304,13 @@ def _from_db_object(context, instance, db_inst, expected_attrs=None):
return instance
@base.remotable_classmethod
- def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
+ def get_by_uuid(cls, context, uuid, expected_attrs=None, use_subordinate=False):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@@ -390,13 +392,15 @@ def save(self, context, expected_vm_state=None,
self.what_changed(). If expected_task_state is provided,
it will be checked against the in-database copy of the
instance before updates are made.
- :param context: Security context
- :param expected_task_state: Optional tuple of valid task states
- for the instance to be in.
- :param expected_vm_state: Optional tuple of valid vm states
- for the instance to be in.
+
+ :param:context: Security context
+ :param:expected_task_state: Optional tuple of valid task states
+ for the instance to be in
+ :param:expected_vm_state: Optional tuple of valid vm states
+ for the instance to be in
:param admin_state_reset: True if admin API is forcing setting
- of task_state/vm_state.
+ of task_state/vm_state
+
"""
cell_type = cells_opts.get_cell_type()
@@ -429,7 +433,7 @@ def _handle_cell_update_from_api():
try:
getattr(self, '_save_%s' % field)(context)
except AttributeError:
- LOG.exception(_('No save handler for %s') % field,
+ LOG.exception(_LE('No save handler for %s'), field,
instance=self)
elif field in changes:
updates[field] = self[field]
@@ -460,6 +464,10 @@ def _handle_cell_update_from_api():
expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
if self.obj_attr_is_set(attr)]
+ if 'pci_devices' in expected_attrs:
+ # NOTE(danms): We don't refresh pci_devices on save right now
+ expected_attrs.remove('pci_devices')
+
# NOTE(alaski): We need to pull system_metadata for the
# notification.send_update() below. If we don't there's a KeyError
# when it tries to extract the flavor.
@@ -475,17 +483,18 @@ def _handle_cell_update_from_api():
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_at_top(context, inst_ref)
- self._from_db_object(context, self, inst_ref, expected_attrs)
+ self._from_db_object(context, self, inst_ref,
+ expected_attrs=expected_attrs)
notifications.send_update(context, old_ref, inst_ref)
self.obj_reset_changes()
@base.remotable
- def refresh(self, context, use_slave=False):
+ def refresh(self, context, use_subordinate=False):
extra = [field for field in INSTANCE_OPTIONAL_ATTRS
if self.obj_attr_is_set(field)]
current = self.__class__.get_by_uuid(context, uuid=self.uuid,
expected_attrs=extra,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
# NOTE(danms): We orphan the instance copy so we do not unexpectedly
# trigger a lazy-load (which would mean we failed to calculate the
# expected_attrs properly)
@@ -501,6 +510,23 @@ def refresh(self, context, use_slave=False):
self[field] = current[field]
self.obj_reset_changes()
+ def _load_generic(self, attrname):
+ instance = self.__class__.get_by_uuid(self._context,
+ uuid=self.uuid,
+ expected_attrs=[attrname])
+
+ # NOTE(danms): Never allow us to recursively-load
+ if instance.obj_attr_is_set(attrname):
+ self[attrname] = instance[attrname]
+ else:
+ raise exception.ObjectActionError(
+ action='obj_load_attr',
+ reason='loading %s requires recursion' % attrname)
+
+ def _load_fault(self):
+ self.fault = objects.InstanceFault.get_latest_for_instance(
+ self._context, self.uuid)
+
def obj_load_attr(self, attrname):
if attrname not in INSTANCE_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
@@ -516,17 +542,13 @@ def obj_load_attr(self, attrname):
'uuid': self.uuid,
})
# FIXME(comstud): This should be optimized to only load the attr.
- instance = self.__class__.get_by_uuid(self._context,
- uuid=self.uuid,
- expected_attrs=[attrname])
-
- # NOTE(danms): Never allow us to recursively-load
- if instance.obj_attr_is_set(attrname):
- self[attrname] = instance[attrname]
+ if attrname == 'fault':
+ # NOTE(danms): We handle fault differently here so that we
+ # can be more efficient
+ self._load_fault()
else:
- raise exception.ObjectActionError(
- action='obj_load_attr',
- reason='loading %s requires recursion' % attrname)
+ self._load_generic(attrname)
+ self.obj_reset_changes([attrname])
def get_flavor(self, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
@@ -568,20 +590,6 @@ def delete_metadata_key(self, context, key):
self.obj_reset_changes(['metadata'])
-def add_image_ref(context, instance):
- """Helper method to add image_ref to instance object."""
- if not instance['image_ref']:
- compute_api = compute.API()
- bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
- context, instance['uuid'])
- if compute_api.is_volume_backed_instance(context, instance, bdms):
- props = bdms.root_metadata(
- context, compute_api.image_api,
- compute_api.volume_api)
- instance['image_ref'] = props['image_id']
- return instance
-
-
def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
get_fault = expected_attrs and 'fault' in expected_attrs
inst_faults = {}
@@ -609,14 +617,15 @@ def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
class InstanceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
- # Version 1.1: Added use_slave to get_by_host
+ # Version 1.1: Added use_subordinate to get_by_host
# Instance <= version 1.9
# Version 1.2: Instance <= version 1.11
- # Version 1.3: Added use_slave to get_by_filters
+ # Version 1.3: Added use_subordinate to get_by_filters
# Version 1.4: Instance <= version 1.12
# Version 1.5: Added method get_active_by_window_joined.
# Version 1.6: Instance <= version 1.13
- VERSION = '1.6'
+ # Version 1.7: Added use_subordinate to get_active_by_window_joined
+ VERSION = '1.7'
fields = {
'objects': fields.ListOfObjectsField('Instance'),
@@ -629,24 +638,25 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
'1.4': '1.12',
'1.5': '1.12',
'1.6': '1.13',
+ '1.7': '1.13',
}
@base.remotable_classmethod
def get_by_filters(cls, context, filters,
sort_key='created_at', sort_dir='desc', limit=None,
- marker=None, expected_attrs=None, use_slave=False):
+ marker=None, expected_attrs=None, use_subordinate=False):
db_inst_list = db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir, limit=limit, marker=marker,
columns_to_join=_expected_cols(expected_attrs),
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
- def get_by_host(cls, context, host, expected_attrs=None, use_slave=False):
+ def get_by_host(cls, context, host, expected_attrs=None, use_subordinate=False):
db_inst_list = db.instance_get_all_by_host(
context, host, columns_to_join=_expected_cols(expected_attrs),
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@@ -676,7 +686,8 @@ def get_hung_in_rebooting(cls, context, reboot_window,
@base.remotable_classmethod
def _get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
- expected_attrs=None):
+ expected_attrs=None,
+ use_subordinate=False):
# NOTE(mriedem): We need to convert the begin/end timestamp strings
# to timezone-aware datetime objects for the DB API call.
begin = timeutils.parse_isotime(begin)
@@ -692,17 +703,20 @@ def _get_active_by_window_joined(cls, context, begin, end=None,
@classmethod
def get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
- expected_attrs=None):
+ expected_attrs=None,
+ use_subordinate=False):
"""Get instances and joins active during a certain time window.
- :param context: nova request context
- :param begin: datetime for the start of the time window
- :param end: datetime for the end of the time window
- :param project_id: used to filter instances by project
- :param host: used to filter instances on a given compute host
- :param expected_attrs: list of related fields that can be joined
+ :param:context: nova request context
+ :param:begin: datetime for the start of the time window
+ :param:end: datetime for the end of the time window
+ :param:project_id: used to filter instances by project
+ :param:host: used to filter instances on a given compute host
+ :param:expected_attrs: list of related fields that can be joined
in the database layer when querying for instances
+ :param use_subordinate if True, ship this query off to a DB subordinate
:returns: InstanceList
+
"""
# NOTE(mriedem): We have to convert the datetime objects to string
# primitives for the remote call.
@@ -710,7 +724,8 @@ def get_active_by_window_joined(cls, context, begin, end=None,
end = timeutils.isotime(end) if end else None
return cls._get_active_by_window_joined(context, begin, end,
project_id, host,
- expected_attrs)
+ expected_attrs,
+ use_subordinate=use_subordinate)
@base.remotable_classmethod
def get_by_security_group_id(cls, context, security_group_id):
diff --git a/nova/objects/instance_fault.py b/nova/objects/instance_fault.py
index 9be7a4e81e..7c2b7a1fd8 100644
--- a/nova/objects/instance_fault.py
+++ b/nova/objects/instance_fault.py
@@ -18,10 +18,10 @@
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
+from nova.i18n import _LE
from nova import objects
from nova.objects import base
from nova.objects import fields
-from nova.openstack.common.gettextutils import _LE
from nova.openstack.common import log as logging
diff --git a/nova/objects/instance_group.py b/nova/objects/instance_group.py
index 5e8ea18e17..842c0ac4a7 100644
--- a/nova/objects/instance_group.py
+++ b/nova/objects/instance_group.py
@@ -18,6 +18,7 @@
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import uuidutils
+from nova import utils
class InstanceGroup(base.NovaPersistentObject, base.NovaObject):
@@ -28,7 +29,8 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject):
# Version 1.4: Add add_members()
# Version 1.5: Add get_hosts()
# Version 1.6: Add get_by_name()
- VERSION = '1.6'
+ # Version 1.7: Deprecate metadetails
+ VERSION = '1.7'
fields = {
'id': fields.IntegerField(),
@@ -40,10 +42,16 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject):
'name': fields.StringField(nullable=True),
'policies': fields.ListOfStringsField(nullable=True),
- 'metadetails': fields.DictOfStringsField(nullable=True),
'members': fields.ListOfStringsField(nullable=True),
}
+ def obj_make_compatible(self, primitive, target_version):
+ target_version = utils.convert_version_to_tuple(target_version)
+ if target_version < (1, 7):
+ # NOTE(danms): Before 1.7, we had an always-empty
+ # metadetails property
+ primitive['metadetails'] = {}
+
@staticmethod
def _from_db_object(context, instance_group, db_inst):
"""Method to help with migration to objects.
@@ -95,11 +103,6 @@ def save(self, context):
if not updates:
return
- metadata = None
- if 'metadetails' in updates:
- metadata = updates.pop('metadetails')
- updates.update({'metadata': metadata})
-
db.instance_group_update(context, self.uuid, updates)
db_inst = db.instance_group_get(context, self.uuid)
self._from_db_object(context, self, db_inst)
@@ -122,11 +125,9 @@ def create(self, context):
updates.pop('id', None)
policies = updates.pop('policies', None)
members = updates.pop('members', None)
- metadetails = updates.pop('metadetails', None)
db_inst = db.instance_group_create(context, updates,
policies=policies,
- metadata=metadetails,
members=members)
self._from_db_object(context, self, db_inst)
@@ -165,6 +166,8 @@ class InstanceGroupList(base.ObjectListBase, base.NovaObject):
# InstanceGroup <= version 1.3
# Version 1.1: InstanceGroup <= version 1.4
# Version 1.2: InstanceGroup <= version 1.5
+ # Version 1.3: InstanceGroup <= version 1.6
+ # Version 1.4: InstanceGroup <= version 1.7
VERSION = '1.2'
fields = {
@@ -176,6 +179,7 @@ class InstanceGroupList(base.ObjectListBase, base.NovaObject):
'1.1': '1.4',
'1.2': '1.5',
'1.3': '1.6',
+ '1.4': '1.7',
}
@base.remotable_classmethod
diff --git a/nova/objects/instance_info_cache.py b/nova/objects/instance_info_cache.py
index 1c6f57b123..9d7011937f 100644
--- a/nova/objects/instance_info_cache.py
+++ b/nova/objects/instance_info_cache.py
@@ -16,9 +16,9 @@
from nova.cells import rpcapi as cells_rpcapi
from nova import db
from nova import exception
+from nova.i18n import _LE
from nova.objects import base
from nova.objects import fields
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -79,8 +79,8 @@ def _info_cache_cells_update(ctxt, info_cache):
try:
cells_api.instance_info_cache_update_at_top(ctxt, info_cache)
except Exception:
- LOG.exception(_("Failed to notify cells of instance info "
- "cache update"))
+ LOG.exception(_LE("Failed to notify cells of instance info "
+ "cache update"))
@base.remotable
def save(self, context, update_cells=True):
diff --git a/nova/objects/migration.py b/nova/objects/migration.py
index 3679198e4a..d9bbb3d5f6 100644
--- a/nova/objects/migration.py
+++ b/nova/objects/migration.py
@@ -81,7 +81,7 @@ def instance(self):
class MigrationList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Migration <= 1.1
- # Version 1.1: Added use_slave to get_unconfirmed_by_dest_compute
+ # Version 1.1: Added use_subordinate to get_unconfirmed_by_dest_compute
VERSION = '1.1'
fields = {
@@ -95,9 +95,9 @@ class MigrationList(base.ObjectListBase, base.NovaObject):
@base.remotable_classmethod
def get_unconfirmed_by_dest_compute(cls, context, confirm_window,
- dest_compute, use_slave=False):
+ dest_compute, use_subordinate=False):
db_migrations = db.migration_get_unconfirmed_by_dest_compute(
- context, confirm_window, dest_compute, use_slave=use_slave)
+ context, confirm_window, dest_compute, use_subordinate=use_subordinate)
return base.obj_make_list(context, cls(context), objects.Migration,
db_migrations)
diff --git a/nova/objects/network.py b/nova/objects/network.py
index 829ff51834..98c09d1c86 100644
--- a/nova/objects/network.py
+++ b/nova/objects/network.py
@@ -20,6 +20,7 @@
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
+from nova import utils
network_opts = [
cfg.BoolOpt('share_dhcp_address',
@@ -97,7 +98,7 @@ def _convert_legacy_ipv6_netmask(netmask):
'or integral prefix' % netmask)
def obj_make_compatible(self, primitive, target_version):
- target_version = tuple(int(x) for x in target_version.split('.'))
+ target_version = utils.convert_version_to_tuple(target_version)
if target_version < (1, 2):
if 'mtu' in primitive:
del primitive['mtu']
diff --git a/nova/objects/pci_device.py b/nova/objects/pci_device.py
index b6fa5a6435..32caadeebd 100644
--- a/nova/objects/pci_device.py
+++ b/nova/objects/pci_device.py
@@ -54,12 +54,14 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
the device object is changed to deleted state and no longer synced with
the DB.
- Filed notes:
- 'dev_id':
- Hypervisor's identification for the device, the string format
- is hypervisor specific
- 'extra_info':
- Device-specific properties like PF address, switch ip address etc.
+ Filed notes::
+
+ | 'dev_id':
+ | Hypervisor's identification for the device, the string format
+ | is hypervisor specific
+ | 'extra_info':
+ | Device-specific properties like PF address, switch ip address etc.
+
"""
# Version 1.0: Initial version
diff --git a/nova/objects/quotas.py b/nova/objects/quotas.py
index 288c7ed668..2731c53d7e 100644
--- a/nova/objects/quotas.py
+++ b/nova/objects/quotas.py
@@ -13,6 +13,7 @@
# under the License.
+from nova import db
from nova.objects import base
from nova.objects import fields
from nova import quota
@@ -39,6 +40,10 @@ def ids_from_security_group(context, security_group):
class Quotas(base.NovaObject):
+ # Version 1.0: initial version
+ # Version 1.1: Added create_limit() and update_limit()
+ VERSION = '1.1'
+
fields = {
'reservations': fields.ListOfStringsField(nullable=True),
'project_id': fields.StringField(nullable=True),
@@ -106,6 +111,20 @@ def rollback(self, context=None):
self.reservations = None
self.obj_reset_changes()
+ @base.remotable_classmethod
+ def create_limit(cls, context, project_id, resource, limit, user_id=None):
+ # NOTE(danms,comstud): Quotas likely needs an overhaul and currently
+ # doesn't map very well to objects. Since there is quite a bit of
+ # logic in the db api layer for this, just pass this through for now.
+ db.quota_create(context, project_id, resource, limit, user_id=user_id)
+
+ @base.remotable_classmethod
+ def update_limit(cls, context, project_id, resource, limit, user_id=None):
+ # NOTE(danms,comstud): Quotas likely needs an overhaul and currently
+ # doesn't map very well to objects. Since there is quite a bit of
+ # logic in the db api layer for this, just pass this through for now.
+ db.quota_update(context, project_id, resource, limit, user_id=user_id)
+
class QuotasNoOp(Quotas):
def reserve(context, expire=None, project_id=None, user_id=None,
diff --git a/nova/objects/security_group_rule.py b/nova/objects/security_group_rule.py
index a131a7173f..ac27b48a8e 100644
--- a/nova/objects/security_group_rule.py
+++ b/nova/objects/security_group_rule.py
@@ -13,6 +13,7 @@
# under the License.
from nova import db
+from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
@@ -22,10 +23,11 @@
class SecurityGroupRule(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
- VERSION = '1.0'
+ # Version 1.1: Added create() and set id as read_only
+ VERSION = '1.1'
fields = {
- 'id': fields.IntegerField(),
+ 'id': fields.IntegerField(read_only=True),
'protocol': fields.StringField(nullable=True),
'from_port': fields.IntegerField(nullable=True),
'to_port': fields.IntegerField(nullable=True),
@@ -54,6 +56,21 @@ def _from_db_object(context, rule, db_rule, expected_attrs=None):
rule.obj_reset_changes()
return rule
+ @base.remotable
+ def create(self, context):
+ if self.obj_attr_is_set('id'):
+ raise exception.ObjectActionError(action='create',
+ reason='already created')
+ updates = self.obj_get_changes()
+ parent_group = updates.pop('parent_group', None)
+ if parent_group:
+ updates['parent_group_id'] = parent_group.id
+ grantee_group = updates.pop('grantee_group', None)
+ if grantee_group:
+ updates['group_id'] = grantee_group.id
+ db_rule = db.security_group_rule_create(context, updates)
+ self._from_db_object(context, self, db_rule)
+
@base.remotable_classmethod
def get_by_id(cls, context, rule_id):
db_rule = db.security_group_rule_get(context, rule_id)
@@ -64,8 +81,10 @@ class SecurityGroupRuleList(base.ObjectListBase, base.NovaObject):
fields = {
'objects': fields.ListOfObjectsField('SecurityGroupRule'),
}
+ VERSION = '1.1'
child_versions = {
'1.0': '1.0',
+ '1.1': '1.1',
}
@base.remotable_classmethod
diff --git a/nova/objects/service.py b/nova/objects/service.py
index 00fb8ac683..6d53930c43 100644
--- a/nova/objects/service.py
+++ b/nova/objects/service.py
@@ -19,6 +19,7 @@
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import log as logging
+from nova import utils
LOG = logging.getLogger(__name__)
@@ -28,7 +29,8 @@ class Service(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added compute_node nested object
# Version 1.2: String attributes updated to support unicode
- VERSION = '1.2'
+ # Version 1.3: ComputeNode version 1.5
+ VERSION = '1.3'
fields = {
'id': fields.IntegerField(read_only=True),
@@ -42,6 +44,13 @@ class Service(base.NovaPersistentObject, base.NovaObject):
'compute_node': fields.ObjectField('ComputeNode'),
}
+ def obj_make_compatible(self, primitive, target_version):
+ target_version = utils.convert_version_to_tuple(target_version)
+ if target_version < (1, 3) and 'compute_node' in primitive:
+ primitive['compute_node'] = (
+ objects.ComputeNode().object_make_compatible(
+ primitive, '1.4'))
+
@staticmethod
def _do_compute_node(context, service, db_service):
try:
@@ -128,7 +137,8 @@ def destroy(self, context):
class ServiceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Service <= version 1.2
- VERSION = '1.0'
+ # Version 1.1 Service version 1.3
+ VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('Service'),
@@ -136,6 +146,7 @@ class ServiceList(base.ObjectListBase, base.NovaObject):
child_versions = {
'1.0': '1.2',
# NOTE(danms): Service was at 1.2 before we added this
+ '1.1': '1.3',
}
@base.remotable_classmethod
diff --git a/nova/objects/virtual_interface.py b/nova/objects/virtual_interface.py
index 51cd24c352..b6671b3bcf 100644
--- a/nova/objects/virtual_interface.py
+++ b/nova/objects/virtual_interface.py
@@ -95,8 +95,8 @@ def get_all(cls, context):
objects.VirtualInterface, db_vifs)
@base.remotable_classmethod
- def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False):
+ def get_by_instance_uuid(cls, context, instance_uuid, use_subordinate=False):
db_vifs = db.virtual_interface_get_by_instance(context, instance_uuid,
- use_slave=use_slave)
+ use_subordinate=use_subordinate)
return base.obj_make_list(context, cls(context),
objects.VirtualInterface, db_vifs)
diff --git a/nova/openstack/common/context.py b/nova/openstack/common/context.py
index 09019ee384..b612db7140 100644
--- a/nova/openstack/common/context.py
+++ b/nova/openstack/common/context.py
@@ -25,7 +25,7 @@
def generate_request_id():
- return 'req-%s' % str(uuid.uuid4())
+ return b'req-' + str(uuid.uuid4()).encode('ascii')
class RequestContext(object):
@@ -77,6 +77,21 @@ def to_dict(self):
'instance_uuid': self.instance_uuid,
'user_identity': user_idt}
+ @classmethod
+ def from_dict(cls, ctx):
+ return cls(
+ auth_token=ctx.get("auth_token"),
+ user=ctx.get("user"),
+ tenant=ctx.get("tenant"),
+ domain=ctx.get("domain"),
+ user_domain=ctx.get("user_domain"),
+ project_domain=ctx.get("project_domain"),
+ is_admin=ctx.get("is_admin", False),
+ read_only=ctx.get("read_only", False),
+ show_deleted=ctx.get("show_deleted", False),
+ request_id=ctx.get("request_id"),
+ instance_uuid=ctx.get("instance_uuid"))
+
def get_admin_context(show_deleted=False):
context = RequestContext(None,
diff --git a/nova/openstack/common/db/sqlalchemy/migration.py b/nova/openstack/common/db/sqlalchemy/migration.py
index 1d6ac34942..b9dd2851ad 100644
--- a/nova/openstack/common/db/sqlalchemy/migration.py
+++ b/nova/openstack/common/db/sqlalchemy/migration.py
@@ -64,7 +64,7 @@ def _get_unique_constraints(self, table):
data = table.metadata.bind.execute(
"""SELECT sql
- FROM sqlite_master
+ FROM sqlite_main
WHERE
type='table' AND
name=:table_name""",
diff --git a/nova/openstack/common/db/sqlalchemy/utils.py b/nova/openstack/common/db/sqlalchemy/utils.py
index 9b7008fb39..02d8cf4848 100644
--- a/nova/openstack/common/db/sqlalchemy/utils.py
+++ b/nova/openstack/common/db/sqlalchemy/utils.py
@@ -29,7 +29,6 @@
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
-from sqlalchemy import or_
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy import String
@@ -184,8 +183,9 @@ def _project_filter(query, db_model, context, project_only):
if request_context.is_user_context(context) and project_only:
if project_only == 'allow_none':
is_none = None
- query = query.filter(or_(db_model.project_id == context.project_id,
- db_model.project_id == is_none))
+ query = query.filter(sqlalchemy.sql.or_(
+ db_model.project_id == context.project_id,
+ db_model.project_id == is_none))
else:
query = query.filter(db_model.project_id == context.project_id)
diff --git a/nova/openstack/common/fileutils.py b/nova/openstack/common/fileutils.py
index 16050a138f..12ae198303 100644
--- a/nova/openstack/common/fileutils.py
+++ b/nova/openstack/common/fileutils.py
@@ -50,8 +50,8 @@ def read_cached_file(filename, force_reload=False):
"""
global _FILE_CACHE
- if force_reload and filename in _FILE_CACHE:
- del _FILE_CACHE[filename]
+ if force_reload:
+ delete_cached_file(filename)
reloaded = False
mtime = os.path.getmtime(filename)
@@ -66,6 +66,17 @@ def read_cached_file(filename, force_reload=False):
return (reloaded, cache_info['data'])
+def delete_cached_file(filename):
+ """Delete cached file if present.
+
+ :param filename: filename to delete
+ """
+ global _FILE_CACHE
+
+ if filename in _FILE_CACHE:
+ del _FILE_CACHE[filename]
+
+
def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
diff --git a/nova/openstack/common/gettextutils.py b/nova/openstack/common/gettextutils.py
index dcb53582a1..b120c0b04c 100644
--- a/nova/openstack/common/gettextutils.py
+++ b/nova/openstack/common/gettextutils.py
@@ -23,7 +23,6 @@
"""
import copy
-import functools
import gettext
import locale
from logging import handlers
@@ -42,7 +41,7 @@ class TranslatorFactory(object):
"""Create translator functions
"""
- def __init__(self, domain, lazy=False, localedir=None):
+ def __init__(self, domain, localedir=None):
"""Establish a set of translation functions for the domain.
:param domain: Name of translation domain,
@@ -55,7 +54,6 @@ def __init__(self, domain, lazy=False, localedir=None):
:type localedir: str
"""
self.domain = domain
- self.lazy = lazy
if localedir is None:
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
self.localedir = localedir
@@ -75,16 +73,19 @@ def _make_translation_func(self, domain=None):
"""
if domain is None:
domain = self.domain
- if self.lazy:
- return functools.partial(Message, domain=domain)
- t = gettext.translation(
- domain,
- localedir=self.localedir,
- fallback=True,
- )
- if six.PY3:
- return t.gettext
- return t.ugettext
+ t = gettext.translation(domain,
+ localedir=self.localedir,
+ fallback=True)
+ # Use the appropriate method of the translation object based
+ # on the python version.
+ m = t.gettext if six.PY3 else t.ugettext
+
+ def f(msg):
+ """oslo.i18n.gettextutils translation function."""
+ if USE_LAZY:
+ return Message(msg, domain=domain)
+ return m(msg)
+ return f
@property
def primary(self):
@@ -147,19 +148,11 @@ def enable_lazy():
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
- # FIXME(dhellmann): This function will be removed in oslo.i18n,
- # because the TranslatorFactory makes it superfluous.
- global _, _LI, _LW, _LE, _LC, USE_LAZY
- tf = TranslatorFactory('nova', lazy=True)
- _ = tf.primary
- _LI = tf.log_info
- _LW = tf.log_warning
- _LE = tf.log_error
- _LC = tf.log_critical
+ global USE_LAZY
USE_LAZY = True
-def install(domain, lazy=False):
+def install(domain):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
@@ -170,26 +163,14 @@ def install(domain, lazy=False):
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
+ Note that to enable lazy translation, enable_lazy must be
+ called.
+
:param domain: the translation domain
- :param lazy: indicates whether or not to install the lazy _() function.
- The lazy _() introduces a way to do deferred translation
- of messages by installing a _ that builds Message objects,
- instead of strings, which can then be lazily translated into
- any available locale.
"""
- if lazy:
- from six import moves
- tf = TranslatorFactory(domain, lazy=True)
- moves.builtins.__dict__['_'] = tf.primary
- else:
- localedir = '%s_LOCALEDIR' % domain.upper()
- if six.PY3:
- gettext.install(domain,
- localedir=os.environ.get(localedir))
- else:
- gettext.install(domain,
- localedir=os.environ.get(localedir),
- unicode=True)
+ from six import moves
+ tf = TranslatorFactory(domain)
+ moves.builtins.__dict__['_'] = tf.primary
class Message(six.text_type):
@@ -350,9 +331,9 @@ def get_available_languages(domain):
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
- # renamed to locale_identifiers() in >=1.0, the requirements master list
+ # renamed to locale_identifiers() in >=1.0, the requirements main list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
- # this check when the master list updates to >=1.0, and update all projects
+ # this check when the main list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
diff --git a/nova/openstack/common/jsonutils.py b/nova/openstack/common/jsonutils.py
index 650c983281..a201b8c817 100644
--- a/nova/openstack/common/jsonutils.py
+++ b/nova/openstack/common/jsonutils.py
@@ -168,6 +168,10 @@ def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
+def dump(obj, fp, *args, **kwargs):
+ return json.dump(obj, fp, *args, **kwargs)
+
+
def loads(s, encoding='utf-8', **kwargs):
return json.loads(strutils.safe_decode(s, encoding), **kwargs)
diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py
index 354af01e25..62c1de3358 100644
--- a/nova/openstack/common/log.py
+++ b/nova/openstack/common/log.py
@@ -33,7 +33,7 @@
import logging.config
import logging.handlers
import os
-import re
+import socket
import sys
import traceback
@@ -41,34 +41,19 @@
import six
from six import moves
+_PY26 = sys.version_info[0:2] == (2, 6)
+
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import local
+# NOTE(flaper87): Pls, remove when graduating this module
+# from the incubator.
+from nova.openstack.common.strutils import mask_password # noqa
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
-_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
-
-# NOTE(ldbragst): Let's build a list of regex objects using the list of
-# _SANITIZE_KEYS we already have. This way, we only have to add the new key
-# to the list of _SANITIZE_KEYS and we can generate regular expressions
-# for XML and JSON automatically.
-_SANITIZE_PATTERNS = []
-_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
- r'(<%(key)s>).*?(%(key)s>)',
- r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
- r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
- r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])'
- '.*?([\'"])',
- r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)']
-
-for key in _SANITIZE_KEYS:
- for pattern in _FORMAT_PATTERNS:
- reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
- _SANITIZE_PATTERNS.append(reg_ex)
-
common_cli_opts = [
cfg.BoolOpt('debug',
@@ -138,6 +123,13 @@
help='Log output to standard error.')
]
+DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN',
+ 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO',
+ 'oslo.messaging=INFO', 'iso8601=WARN',
+ 'requests.packages.urllib3.connectionpool=WARN',
+ 'urllib3.connectionpool=WARN', 'websocket=WARN',
+ "keystonemiddleware=WARN", "routes.middleware=WARN"]
+
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
@@ -156,17 +148,7 @@
'%(instance)s',
help='Prefix each line of exception output with this format.'),
cfg.ListOpt('default_log_levels',
- default=[
- 'amqp=WARN',
- 'amqplib=WARN',
- 'boto=WARN',
- 'qpid=WARN',
- 'sqlalchemy=WARN',
- 'suds=INFO',
- 'oslo.messaging=INFO',
- 'iso8601=WARN',
- 'requests.packages.urllib3.connectionpool=WARN'
- ],
+ default=DEFAULT_LOG_LEVELS,
help='List of logger=LEVEL pairs.'),
cfg.BoolOpt('publish_errors',
default=False,
@@ -244,45 +226,20 @@ def _get_log_file_path(binary=None):
return None
-def mask_password(message, secret="***"):
- """Replace password with 'secret' in message.
-
- :param message: The string which includes security information.
- :param secret: value with which to replace passwords.
- :returns: The unicode value of message with the password fields masked.
-
- For example:
-
- >>> mask_password("'adminPass' : 'aaaaa'")
- "'adminPass' : '***'"
- >>> mask_password("'admin_pass' : 'aaaaa'")
- "'admin_pass' : '***'"
- >>> mask_password('"password" : "aaaaa"')
- '"password" : "***"'
- >>> mask_password("'original_password' : 'aaaaa'")
- "'original_password' : '***'"
- >>> mask_password("u'original_password' : u'aaaaa'")
- "u'original_password' : u'***'"
- """
- message = six.text_type(message)
-
- # NOTE(ldbragst): Check to see if anything in message contains any key
- # specified in _SANITIZE_KEYS, if not then just return the message since
- # we don't have to mask any passwords.
- if not any(key in message for key in _SANITIZE_KEYS):
- return message
-
- secret = r'\g<1>' + secret + r'\g<2>'
- for pattern in _SANITIZE_PATTERNS:
- message = re.sub(pattern, secret, message)
- return message
-
-
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
+ def isEnabledFor(self, level):
+ if _PY26:
+ # This method was added in python 2.7 (and it does the exact
+ # same logic, so we need to do the exact same logic so that
+ # python 2.6 has this capability as well).
+ return self.logger.isEnabledFor(level)
+ else:
+ return super(BaseLoggerAdapter, self).isEnabledFor(level)
+
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
@@ -295,6 +252,11 @@ def __init__(self, name='unknown', version='unknown'):
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
+ if six.PY3:
+ # In Python 3, the code fails because the 'manager' attribute
+ # cannot be found when using a LoggerAdapter as the
+ # underlying logger. Work around this issue.
+ self._logger.manager = self._logger.logger.manager
return self._logger
@@ -340,11 +302,10 @@ def deprecated(self, msg, *args, **kwargs):
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
- # NOTE(mrodden): catch any Message/other object and
- # coerce to unicode before they can get
- # to the python logging and possibly
- # cause string encoding trouble
- if not isinstance(msg, six.string_types):
+ # NOTE(jecarey): If msg is not unicode, coerce it into unicode
+ # before it can get to the python logging and
+ # possibly cause string encoding trouble
+ if not isinstance(msg, six.text_type):
msg = six.text_type(msg)
if 'extra' not in kwargs:
@@ -448,7 +409,7 @@ def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
- except moves.configparser.Error as exc:
+ except (moves.configparser.Error, KeyError) as exc:
raise LogConfigError(log_config_append, six.text_type(exc))
@@ -461,9 +422,20 @@ def setup(product_name, version='unknown'):
sys.excepthook = _create_logging_excepthook(product_name)
-def set_defaults(logging_context_format_string):
- cfg.set_defaults(
- log_opts, logging_context_format_string=logging_context_format_string)
+def set_defaults(logging_context_format_string=None,
+ default_log_levels=None):
+ # Just in case the caller is not setting the
+ # default_log_level. This is insurance because
+ # we introduced the default_log_level parameter
+ # later in a backwards in-compatible change
+ if default_log_levels is not None:
+ cfg.set_defaults(
+ log_opts,
+ default_log_levels=default_log_levels)
+ if logging_context_format_string is not None:
+ cfg.set_defaults(
+ log_opts,
+ logging_context_format_string=logging_context_format_string)
def _find_facility_from_conf():
@@ -512,18 +484,6 @@ def _setup_logging_from_conf(project, version):
for handler in log_root.handlers:
log_root.removeHandler(handler)
- if CONF.use_syslog:
- facility = _find_facility_from_conf()
- # TODO(bogdando) use the format provided by RFCSysLogHandler
- # after existing syslog format deprecation in J
- if CONF.use_syslog_rfc_format:
- syslog = RFCSysLogHandler(address='/dev/log',
- facility=facility)
- else:
- syslog = logging.handlers.SysLogHandler(address='/dev/log',
- facility=facility)
- log_root.addHandler(syslog)
-
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
@@ -582,6 +542,20 @@ def _setup_logging_from_conf(project, version):
else:
logger.setLevel(level_name)
+ if CONF.use_syslog:
+ try:
+ facility = _find_facility_from_conf()
+ # TODO(bogdando) use the format provided by RFCSysLogHandler
+ # after existing syslog format deprecation in J
+ if CONF.use_syslog_rfc_format:
+ syslog = RFCSysLogHandler(facility=facility)
+ else:
+ syslog = logging.handlers.SysLogHandler(facility=facility)
+ log_root.addHandler(syslog)
+ except socket.error:
+ log_root.error('Unable to add syslog handler. Verify that syslog'
+ 'is running.')
+
_loggers = {}
@@ -651,6 +625,12 @@ def __init__(self, *args, **kwargs):
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
+ # NOTE(jecarey): If msg is not unicode, coerce it into unicode
+ # before it can get to the python logging and
+ # possibly cause string encoding trouble
+ if not isinstance(record.msg, six.text_type):
+ record.msg = six.text_type(record.msg)
+
# store project info
record.project = self.project
record.version = self.version
diff --git a/nova/openstack/common/loopingcall.py b/nova/openstack/common/loopingcall.py
index d072d24ada..82411af926 100644
--- a/nova/openstack/common/loopingcall.py
+++ b/nova/openstack/common/loopingcall.py
@@ -16,31 +16,36 @@
# under the License.
import sys
+import time
from eventlet import event
from eventlet import greenthread
from nova.openstack.common.gettextutils import _LE, _LW
from nova.openstack.common import log as logging
-from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
+# NOTE(zyluo): This lambda function was declared to avoid mocking collisions
+# with time.time() called in the standard logging module
+# during unittests.
+_ts = lambda: time.time()
+
class LoopingCallDone(Exception):
- """Exception to break out and stop a LoopingCall.
+ """Exception to break out and stop a LoopingCallBase.
- The poll-function passed to LoopingCall can raise this exception to
+ The poll-function passed to LoopingCallBase can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
- this return-value will be returned by LoopingCall.wait()
+ this return-value will be returned by LoopingCallBase.wait()
"""
def __init__(self, retvalue=True):
- """:param retvalue: Value that LoopingCall.wait() should return."""
+ """:param retvalue: Value that LoopingCallBase.wait() should return."""
self.retvalue = retvalue
@@ -72,16 +77,17 @@ def _inner():
try:
while self._running:
- start = timeutils.utcnow()
+ start = _ts()
self.f(*self.args, **self.kw)
- end = timeutils.utcnow()
+ end = _ts()
if not self._running:
break
- delay = interval - timeutils.delta_seconds(start, end)
- if delay <= 0:
- LOG.warn(_LW('task run outlasted interval by %s sec') %
- -delay)
- greenthread.sleep(delay if delay > 0 else 0)
+ delay = end - start - interval
+ if delay > 0:
+ LOG.warn(_LW('task %(func_name)s run outlasted '
+ 'interval by %(delay).2f sec'),
+ {'func_name': repr(self.f), 'delay': delay})
+ greenthread.sleep(-delay if delay < 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
@@ -98,11 +104,6 @@ def _inner():
return self.done
-# TODO(mikal): this class name is deprecated in Havana and should be removed
-# in the I release
-LoopingCall = FixedIntervalLoopingCall
-
-
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
@@ -126,8 +127,9 @@ def _inner():
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
- LOG.debug('Dynamic looping call sleeping for %.02f '
- 'seconds', idle)
+ LOG.debug('Dynamic looping call %(func_name)s sleeping '
+ 'for %(idle).02f seconds',
+ {'func_name': repr(self.f), 'idle': idle})
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
diff --git a/nova/openstack/common/memorycache.py b/nova/openstack/common/memorycache.py
index 313a8c14fb..5e16363eaa 100644
--- a/nova/openstack/common/memorycache.py
+++ b/nova/openstack/common/memorycache.py
@@ -22,7 +22,6 @@
memcache_opts = [
cfg.ListOpt('memcached_servers',
- default=None,
help='Memcached servers or None for in process cache.'),
]
@@ -36,11 +35,8 @@ def get_client(memcached_servers=None):
if not memcached_servers:
memcached_servers = CONF.memcached_servers
if memcached_servers:
- try:
- import memcache
- client_cls = memcache.Client
- except ImportError:
- pass
+ import memcache
+ client_cls = memcache.Client
return client_cls(memcached_servers, debug=0)
diff --git a/nova/openstack/common/network_utils.py b/nova/openstack/common/network_utils.py
index 88f4c9f638..331edcb411 100644
--- a/nova/openstack/common/network_utils.py
+++ b/nova/openstack/common/network_utils.py
@@ -113,16 +113,15 @@ def set_tcp_keepalive(sock, tcp_keepalive=True,
This function configures tcp keepalive parameters if users wish to do
so.
- :param tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are
- not sure, this should be True, and default values will be used.
- :param tcp_keepidle: time to wait before starting to send keepalive probes
+ :param:tcp_keepalive: Boolean, turn on or off tcp_keepalive. If users are
+ not sure, this should be True, and default values will be used
+ :param:tcp_keepidle: time to wait before starting to send keepalive probes
+ :param:tcp_keepalive_interval: time between successive probes, once the
+ initial wait time is over
+ :param:tcp_keepalive_count: number of probes to send before the connection
+ is killed
- :param tcp_keepalive_interval: time between successive probes, once the
- initial wait time is over
-
- :param tcp_keepalive_count: number of probes to send before the connection
- is killed
"""
# NOTE(praneshp): Despite keepalive being a tcp concept, the level is
diff --git a/nova/openstack/common/report/report.py b/nova/openstack/common/report/report.py
index 7fca30c777..730ab4ac0c 100644
--- a/nova/openstack/common/report/report.py
+++ b/nova/openstack/common/report/report.py
@@ -89,9 +89,9 @@ class ReportSection(object):
:func:`BasicReport.add_section`
:param view: the top-level view for this section
- :param generator: the generator for this section
- (any callable object which takes
- no parameters and returns a data model)
+ :param generator: the generator for this section which could be
+ any callable object which takes
+ no parameters and returns a data model
"""
def __init__(self, view, generator):
diff --git a/nova/openstack/common/sslutils.py b/nova/openstack/common/sslutils.py
index a18e7fd051..00e6173d07 100644
--- a/nova/openstack/common/sslutils.py
+++ b/nova/openstack/common/sslutils.py
@@ -22,15 +22,12 @@
ssl_opts = [
cfg.StrOpt('ca_file',
- default=None,
help="CA certificate file to use to verify "
"connecting clients."),
cfg.StrOpt('cert_file',
- default=None,
help="Certificate file to use when starting "
"the server securely."),
cfg.StrOpt('key_file',
- default=None,
help="Private key file to use when starting "
"the server securely."),
]
diff --git a/nova/openstack/common/strutils.py b/nova/openstack/common/strutils.py
index 3d98260b1d..b75eb85354 100644
--- a/nova/openstack/common/strutils.py
+++ b/nova/openstack/common/strutils.py
@@ -50,6 +50,28 @@
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
+# NOTE(flaper87): The following 3 globals are used by `mask_password`
+_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
+
+# NOTE(ldbragst): Let's build a list of regex objects using the list of
+# _SANITIZE_KEYS we already have. This way, we only have to add the new key
+# to the list of _SANITIZE_KEYS and we can generate regular expressions
+# for XML and JSON automatically.
+_SANITIZE_PATTERNS = []
+_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
+ r'(<%(key)s>).*?(%(key)s>)',
+ r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
+ r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
+ r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])'
+ '.*?([\'"])',
+ r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)']
+
+for key in _SANITIZE_KEYS:
+ for pattern in _FORMAT_PATTERNS:
+ reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
+ _SANITIZE_PATTERNS.append(reg_ex)
+
+
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
@@ -237,3 +259,37 @@ def to_slug(value, incoming=None, errors="strict"):
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
+
+
+def mask_password(message, secret="***"):
+ """Replace password with 'secret' in message.
+
+ :param message: The string which includes security information.
+ :param secret: value with which to replace passwords.
+ :returns: The unicode value of message with the password fields masked.
+
+ For example:
+
+ >>> mask_password("'adminPass' : 'aaaaa'")
+ "'adminPass' : '***'"
+ >>> mask_password("'admin_pass' : 'aaaaa'")
+ "'admin_pass' : '***'"
+ >>> mask_password('"password" : "aaaaa"')
+ '"password" : "***"'
+ >>> mask_password("'original_password' : 'aaaaa'")
+ "'original_password' : '***'"
+ >>> mask_password("u'original_password' : u'aaaaa'")
+ "u'original_password' : u'***'"
+ """
+ message = six.text_type(message)
+
+ # NOTE(ldbragst): Check to see if anything in message contains any key
+ # specified in _SANITIZE_KEYS, if not then just return the message since
+ # we don't have to mask any passwords.
+ if not any(key in message for key in _SANITIZE_KEYS):
+ return message
+
+ secret = r'\g<1>' + secret + r'\g<2>'
+ for pattern in _SANITIZE_PATTERNS:
+ message = re.sub(pattern, secret, message)
+ return message
diff --git a/nova/openstack/common/systemd.py b/nova/openstack/common/systemd.py
index 4fa0c62790..5628d54f00 100644
--- a/nova/openstack/common/systemd.py
+++ b/nova/openstack/common/systemd.py
@@ -50,14 +50,16 @@ def _sd_notify(unset_env, msg):
def notify():
"""Send notification to Systemd that service is ready.
+
For details see
- http://www.freedesktop.org/software/systemd/man/sd_notify.html
+ http://www.freedesktop.org/software/systemd/man/sd_notify.html
"""
_sd_notify(False, 'READY=1')
def notify_once():
"""Send notification once to Systemd that service is ready.
+
Systemd sets NOTIFY_SOCKET environment variable with the name of the
socket listening for notifications from services.
This method removes the NOTIFY_SOCKET environment variable to ensure
@@ -75,7 +77,7 @@ def onready(notify_socket, timeout):
:type timeout: float
:returns: 0 service ready
1 service not ready
- 2 timeout occured
+ 2 timeout occurred
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(timeout)
diff --git a/nova/openstack/common/versionutils.py b/nova/openstack/common/versionutils.py
index 86e196140d..1facce7726 100644
--- a/nova/openstack/common/versionutils.py
+++ b/nova/openstack/common/versionutils.py
@@ -18,6 +18,7 @@
"""
import functools
+
import pkg_resources
from nova.openstack.common.gettextutils import _
@@ -52,18 +53,34 @@ class deprecated(object):
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
+ 4. Specifying the deprecated functionality will not be removed:
+ >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
+ ... def d(): pass
+
+ 5. Specifying a replacement, deprecated functionality will not be removed:
+ >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
+ ... def e(): pass
+
"""
+ # NOTE(morganfainberg): Bexar is used for unit test purposes, it is
+ # expected we maintain a gap between Bexar and Folsom in this list.
+ BEXAR = 'B'
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
+ JUNO = 'J'
_RELEASES = {
+ # NOTE(morganfainberg): Bexar is used for unit test purposes, it is
+ # expected we maintain a gap between Bexar and Folsom in this list.
+ 'B': 'Bexar',
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
+ 'J': 'Juno',
}
_deprecated_msg_with_alternative = _(
@@ -74,6 +91,12 @@ class deprecated(object):
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
+ _deprecated_msg_with_alternative_no_removal = _(
+ '%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
+
+ _deprecated_msg_with_no_alternative_no_removal = _(
+ '%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
+
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
@@ -119,9 +142,19 @@ def _build_message(self):
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
- msg = self._deprecated_msg_with_alternative
+ if self.remove_in > 0:
+ msg = self._deprecated_msg_with_alternative
+ else:
+ # There are no plans to remove this function, but it is
+ # now deprecated.
+ msg = self._deprecated_msg_with_alternative_no_removal
else:
- msg = self._deprecated_msg_no_alternative
+ if self.remove_in > 0:
+ msg = self._deprecated_msg_no_alternative
+ else:
+ # There are no plans to remove this function, but it is
+ # now deprecated.
+ msg = self._deprecated_msg_with_no_alternative_no_removal
return msg, details
diff --git a/nova/pci/pci_devspec.py b/nova/pci/pci_devspec.py
new file mode 100755
index 0000000000..a03cd80b9a
--- /dev/null
+++ b/nova/pci/pci_devspec.py
@@ -0,0 +1,181 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ast
+import re
+
+from nova import exception
+from nova.openstack.common import jsonutils
+from nova.pci import pci_utils
+
+MAX_VENDOR_ID = 0xFFFF
+MAX_PRODUCT_ID = 0xFFFF
+MAX_FUNC = 0x7
+MAX_DOMAIN = 0xFFFF
+MAX_BUS = 0xFF
+MAX_SLOT = 0x1F
+ANY = '*'
+VIRTFN_RE = re.compile("virtfn\d+")
+
+
+def get_value(v):
+ return ast.literal_eval("0x" + v)
+
+
+def get_pci_dev_info(pci_obj, property, max, hex_value):
+ a = getattr(pci_obj, property)
+ if a == ANY:
+ return
+ v = get_value(a)
+ if v > max:
+ raise exception.PciConfigInvalidWhitelist(
+ reason = "invalid %s %s" % (property, a))
+ setattr(pci_obj, property, hex_value % v)
+
+
+class PciAddress(object):
+ """Manages the address fields of the whitelist.
+
+ This class checks the address fields of the pci_passthrough_whitelist
+ configuration option, validating the address fields.
+ Example config are:
+
+ | pci_passthrough_whitelist = {"address":"*:0a:00.*",
+ | "physical_network":"physnet1"}
+ | pci_passthrough_whitelist = {"address":":0a:00.",
+
+ This function class will validate the address fields, check for wildcards,
+ and insert wildcards where the field is left blank.
+ """
+ def __init__(self, pci_addr, is_physical_function):
+ self.domain = ANY
+ self.bus = ANY
+ self.slot = ANY
+ self.func = ANY
+ self.is_physical_function = is_physical_function
+ self._init_address_fields(pci_addr)
+
+ def _check_physical_function(self):
+ if ANY in (self.domain, self.bus, self.slot, self.func):
+ return
+ self.is_physical_function = pci_utils.is_physical_function(self)
+
+ def _init_address_fields(self, pci_addr):
+ if self.is_physical_function:
+ (self.domain, self.bus, self.slot,
+ self.func) = pci_utils.get_pci_address_fields(pci_addr)
+ return
+ dbs, sep, func = pci_addr.partition('.')
+ if func:
+ fstr = func.strip()
+ if fstr != ANY:
+ try:
+ f = get_value(fstr)
+ except SyntaxError:
+ raise exception.PciDeviceWrongAddressFormat(
+ address=pci_addr)
+ if f > MAX_FUNC:
+ raise exception.PciDeviceInvalidAddressField(
+ address=pci_addr, field="function")
+ self.func = "%1x" % f
+ if dbs:
+ dbs_fields = dbs.split(':')
+ if len(dbs_fields) > 3:
+ raise exception.PciDeviceWrongAddressFormat(address=pci_addr)
+ # If we got a partial address like ":00.", we need to to turn this
+ # into a domain of ANY, a bus of ANY, and a slot of 00. This code
+ # allows the address bus and/or domain to be left off
+ dbs_all = [ANY for x in range(3 - len(dbs_fields))]
+ dbs_all.extend(dbs_fields)
+ dbs_checked = [s.strip() or ANY for s in dbs_all]
+ self.domain, self.bus, self.slot = dbs_checked
+ get_pci_dev_info(self, 'domain', MAX_DOMAIN, '%04x')
+ get_pci_dev_info(self, 'bus', MAX_BUS, '%02x')
+ get_pci_dev_info(self, 'slot', MAX_SLOT, '%02x')
+ self._check_physical_function()
+
+ def match(self, pci_addr, pci_phys_addr):
+ # Assume this is called given pci_add and pci_phys_addr from libvirt,
+ # no attempt is made to verify pci_addr is a VF of pci_phys_addr
+ if self.is_physical_function:
+ if not pci_phys_addr:
+ return False
+ domain, bus, slot, func = (
+ pci_utils.get_pci_address_fields(pci_phys_addr))
+ return (self.domain == domain and self.bus == bus and
+ self.slot == slot and self.func == func)
+ else:
+ domain, bus, slot, func = (
+ pci_utils.get_pci_address_fields(pci_addr))
+ conditions = [
+ self.domain in (ANY, domain),
+ self.bus in (ANY, bus),
+ self.slot in (ANY, slot),
+ self.func in (ANY, func)
+ ]
+ return all(conditions)
+
+
+class PciDeviceSpec(object):
+ def __init__(self, dev_spec):
+ self.dev_spec = dev_spec
+ self._init_dev_details()
+ self.dev_count = 0
+
+ def _init_dev_details(self):
+ details = jsonutils.loads(self.dev_spec)
+ self.vendor_id = details.pop("vendor_id", ANY)
+ self.product_id = details.pop("product_id", ANY)
+ self.address = details.pop("address", None)
+ self.dev_name = details.pop("devname", None)
+
+ self.vendor_id = self.vendor_id.strip()
+ get_pci_dev_info(self, 'vendor_id', MAX_VENDOR_ID, '%04x')
+ get_pci_dev_info(self, 'product_id', MAX_PRODUCT_ID, '%04x')
+
+ pf = False
+ if self.address and self.dev_name:
+ raise exception.PciDeviceInvalidDeviceName()
+ if not self.address:
+ if self.dev_name:
+ self.address, pf = pci_utils.get_function_by_ifname(
+ self.dev_name)
+ if not self.address:
+ raise exception.PciDeviceNotFoundById(id=self.dev_name)
+ else:
+ self.address = "*:*:*.*"
+
+ self.address = PciAddress(self.address, pf)
+ self.tags = details
+
+ def match(self, dev_dict):
+ conditions = [
+ self.vendor_id in (ANY, dev_dict['vendor_id']),
+ self.product_id in (ANY, dev_dict['product_id']),
+ self.address.match(dev_dict['address'],
+ dev_dict.get('phys_function'))
+ ]
+ return all(conditions)
+
+ def match_pci_obj(self, pci_obj):
+ if pci_obj.extra_info:
+ phy_func = pci_obj.extra_info.get('phys_function')
+ else:
+ phy_func = None
+ return self.match({'vendor_id': pci_obj.vendor_id,
+ 'product_id': pci_obj.product_id,
+ 'address': pci_obj.address,
+ 'phys_function': phy_func})
+
+ def get_tags(self):
+ return self.tags
diff --git a/nova/pci/pci_manager.py b/nova/pci/pci_manager.py
index bdbad0d745..a56ab42152 100644
--- a/nova/pci/pci_manager.py
+++ b/nova/pci/pci_manager.py
@@ -20,14 +20,12 @@
from nova.compute import vm_states
from nova import context
from nova import exception
-from nova.objects import instance
-from nova.objects import pci_device as pci_device_obj
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova import objects
from nova.openstack.common import log as logging
from nova.pci import pci_device
from nova.pci import pci_request
from nova.pci import pci_stats
-from nova.pci import pci_utils
LOG = logging.getLogger(__name__)
@@ -58,8 +56,7 @@ def __init__(self, node_id=None):
self.stats = pci_stats.PciDeviceStats()
if node_id:
self.pci_devs = list(
- pci_device_obj.PciDeviceList.get_by_compute_node(
- context, node_id))
+ objects.PciDeviceList.get_by_compute_node(context, node_id))
else:
self.pci_devs = []
self._initial_instance_usage()
@@ -76,46 +73,6 @@ def _initial_instance_usage(self):
elif dev['status'] == 'available':
self.stats.add_device(dev)
- def _filter_devices_for_spec(self, request_spec, pci_devs):
- return [p for p in pci_devs
- if pci_utils.pci_device_prop_match(p, request_spec)]
-
- def _get_free_devices_for_request(self, pci_request, pci_devs):
- count = pci_request.get('count', 1)
- spec = pci_request.get('spec', [])
- devs = self._filter_devices_for_spec(spec, pci_devs)
- if len(devs) < count:
- return None
- else:
- return devs[:count]
-
- @property
- def free_devs(self):
- return [dev for dev in self.pci_devs if dev.status == 'available']
-
- def get_free_devices_for_requests(self, pci_requests):
- """Select free pci devices for requests
-
- Pci_requests is a list of pci_request dictionaries. Each dictionary
- has three keys:
- count: number of pci devices required, default 1
- spec: the pci properties that the devices should meet
- alias_name: alias the pci_request is translated from, optional
-
- If any single pci_request cannot find any free devices, then the
- entire request list will fail.
- """
- alloc = []
-
- for request in pci_requests:
- available = self._get_free_devices_for_request(
- request,
- [p for p in self.free_devs if p not in alloc])
- if not available:
- return []
- alloc.extend(available)
- return alloc
-
@property
def all_devs(self):
return self.pci_devs
@@ -164,7 +121,7 @@ def set_hvdevs(self, devices):
else:
# Note(yjiang5): no need to update stats if an assigned
# device is hot removed.
- self.stats.consume_device(existed)
+ self.stats.remove_device(existed)
else:
new_value = next((dev for dev in devices if
dev['address'] == existed['address']))
@@ -189,7 +146,7 @@ def set_hvdevs(self, devices):
for dev in [dev for dev in devices if
dev['address'] in new_addrs - exist_addrs]:
dev['compute_node_id'] = self.node_id
- dev_obj = pci_device_obj.PciDevice.create(dev)
+ dev_obj = objects.PciDevice.create(dev)
self.pci_devs.append(dev_obj)
self.stats.add_device(dev_obj)
@@ -198,12 +155,11 @@ def _claim_instance(self, instance, prefix=''):
instance, prefix)
if not pci_requests:
return None
- devs = self.get_free_devices_for_requests(pci_requests)
+ devs = self.stats.consume_requests(pci_requests)
if not devs:
raise exception.PciDeviceRequestFailed(pci_requests)
for dev in devs:
pci_device.claim(dev, instance)
- self.stats.consume_device(dev)
return devs
def _allocate_instance(self, instance, devs):
@@ -314,9 +270,9 @@ def set_compute_node_id(self, node_id):
def get_instance_pci_devs(inst):
"""Get the devices assigned to the instances."""
- if isinstance(inst, instance.Instance):
+ if isinstance(inst, objects.Instance):
return inst.pci_devices
else:
ctxt = context.get_admin_context()
- return pci_device_obj.PciDeviceList.get_by_instance_uuid(
+ return objects.PciDeviceList.get_by_instance_uuid(
ctxt, inst['uuid'])
diff --git a/nova/pci/pci_request.py b/nova/pci/pci_request.py
index d07dfbd96d..cb031e5cc1 100644
--- a/nova/pci/pci_request.py
+++ b/nova/pci/pci_request.py
@@ -13,23 +13,27 @@
# License for the specific language governing permissions and limitations
# under the License.
-""" Example of a PCI alias:
- pci_alias = '{
- "name": "QuicAssist",
- "product_id": "0443",
- "vendor_id": "8086",
- "device_type": "ACCEL",
- }'
-
- Aliases with the same name and the same device_type are OR operation:
- pci_alias = '{
- "name": "QuicAssist",
- "product_id": "0442",
- "vendor_id": "8086",
- "device_type": "ACCEL",
- }'
+""" Example of a PCI alias::
+
+ | pci_alias = '{
+ | "name": "QuicAssist",
+ | "product_id": "0443",
+ | "vendor_id": "8086",
+ | "device_type": "ACCEL",
+ | }'
+
+ Aliases with the same name and the same device_type are OR operation::
+
+ | pci_alias = '{
+ | "name": "QuicAssist",
+ | "product_id": "0442",
+ | "vendor_id": "8086",
+ | "device_type": "ACCEL",
+ | }'
+
These 2 aliases define a device request meaning: vendor_id is "8086" and
product id is "0442" or "0443".
+
"""
import copy
@@ -159,18 +163,20 @@ def get_pci_requests_from_flavor(flavor):
optional 'alias_name' is the corresponding alias definition name.
Example:
- Assume alias configuration is:
- {'vendor_id':'8086',
- 'device_id':'1502',
- 'name':'alias_1'}
+ Assume alias configuration is::
+
+ | {'vendor_id':'8086',
+ | 'device_id':'1502',
+ | 'name':'alias_1'}
The flavor extra specs includes: 'pci_passthrough:alias': 'alias_1:2'.
- The returned pci_requests are:
- pci_requests = [{'count':2,
- 'specs': [{'vendor_id':'8086',
- 'device_id':'1502'}],
- 'alias_name': 'alias_1'}]
+ The returned pci_requests are::
+
+ | pci_requests = [{'count':2,
+ | 'specs': [{'vendor_id':'8086',
+ | 'device_id':'1502'}],
+ | 'alias_name': 'alias_1'}]
:param flavor: the flavor to be checked
:returns: a list of pci requests
diff --git a/nova/pci/pci_stats.py b/nova/pci/pci_stats.py
index ae9454253f..e83ec22528 100644
--- a/nova/pci/pci_stats.py
+++ b/nova/pci/pci_stats.py
@@ -17,6 +17,7 @@
import copy
from nova import exception
+from nova.i18n import _LE
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.pci import pci_utils
@@ -70,8 +71,10 @@ def add_device(self, dev):
if not pool:
pool = dict((k, dev.get(k)) for k in self.pool_keys)
pool['count'] = 0
+ pool['devices'] = []
self.pools.append(pool)
pool['count'] += 1
+ pool['devices'].append(dev)
@staticmethod
def _decrease_pool_count(pool_list, pool, count=1):
@@ -87,14 +90,56 @@ def _decrease_pool_count(pool_list, pool, count=1):
pool_list.remove(pool)
return count
- def consume_device(self, dev):
+ def remove_device(self, dev):
"""Remove one device from the first pool that it matches."""
pool = self._get_first_pool(dev)
if not pool:
raise exception.PciDevicePoolEmpty(
compute_node_id=dev.compute_node_id, address=dev.address)
+ pool['devices'].remove(dev)
self._decrease_pool_count(self.pools, pool)
+ def get_free_devs(self):
+ free_devs = []
+ for pool in self.pools:
+ free_devs.extend(pool['devices'])
+ return free_devs
+
+ def consume_requests(self, pci_requests):
+ alloc_devices = []
+ for request in pci_requests:
+ count = request.get('count', 1)
+ spec = request.get('spec', [])
+ # For now, keep the same algorithm as during scheduling:
+ # a spec may be able to match multiple pools.
+ pools = self._filter_pools_for_spec(self.pools, spec)
+ # Failed to allocate the required number of devices
+ # Return the devices already allocated back to their pools
+ if sum([pool['count'] for pool in pools]) < count:
+ LOG.error(_LE("Failed to allocate PCI devices for instance."
+ " Unassigning devices back to pools."
+ " This should not happen, since the scheduler"
+ " should have accurate information, and allocation"
+ " during claims is controlled via a hold"
+ " on the compute node semaphore"))
+ for d in range(len(alloc_devices)):
+ self.add_device(alloc_devices.pop())
+ raise exception.PciDeviceRequestFailed(requests=pci_requests)
+
+ for pool in pools:
+ if pool['count'] >= count:
+ num_alloc = count
+ else:
+ num_alloc = pool['count']
+ count -= num_alloc
+ pool['count'] -= num_alloc
+ for d in range(num_alloc):
+ pci_dev = pool['devices'].pop()
+ alloc_devices.append(pci_dev)
+ if count == 0:
+ break
+ return alloc_devices
+
@staticmethod
def _filter_pools_for_spec(pools, request_specs):
return [pool for pool in pools
@@ -134,7 +179,12 @@ def apply_requests(self, requests):
raise exception.PciDeviceRequestFailed(requests=requests)
def __iter__(self):
- return iter(self.pools)
+ # 'devices' shouldn't be part of stats
+ pools = []
+ for pool in self.pools:
+ tmp = dict((k, v) for k, v in pool.iteritems() if k != 'devices')
+ pools.append(tmp)
+ return iter(pools)
def clear(self):
"""Clear all the stats maintained."""
diff --git a/nova/pci/pci_utils.py b/nova/pci/pci_utils.py
index a9282ff15e..fbdec9effc 100644
--- a/nova/pci/pci_utils.py
+++ b/nova/pci/pci_utils.py
@@ -15,10 +15,14 @@
# under the License.
+import os
import re
from nova import exception
+from nova.i18n import _LE
+from nova.openstack.common import log as logging
+LOG = logging.getLogger(__name__)
PCI_VENDOR_PATTERN = "^(hex{4})$".replace("hex", "[\da-fA-F]")
_PCI_ADDRESS_PATTERN = ("^(hex{4}):(hex{2}):(hex{2}).(oct{1})$".
@@ -26,6 +30,8 @@
replace("oct", "[0-7]"))
_PCI_ADDRESS_REGEX = re.compile(_PCI_ADDRESS_PATTERN)
+_VIRTFN_RE = re.compile("virtfn\d+")
+
def pci_device_prop_match(pci_dev, specs):
"""Check if the pci_dev meet spec requirement
@@ -53,3 +59,52 @@ def parse_address(address):
if not m:
raise exception.PciDeviceWrongAddressFormat(address=address)
return m.groups()
+
+
+def get_pci_address_fields(pci_addr):
+ dbs, sep, func = pci_addr.partition('.')
+ domain, bus, slot = dbs.split(':')
+ return (domain, bus, slot, func)
+
+
+def get_function_by_ifname(ifname):
+ """Given the device name, returns the PCI address of a an device
+ and returns True if the address in a physical function.
+ """
+ try:
+ dev_path = "/sys/class/net/%s/device" % ifname
+ dev_info = os.listdir(dev_path)
+ for dev_file in dev_info:
+ if _VIRTFN_RE.match(dev_file):
+ return os.readlink(dev_path).strip("./"), True
+ else:
+ return os.readlink(dev_path).strip("./"), False
+ except Exception:
+ LOG.error(_LE("PCI device %s not found") % ifname)
+ return None, False
+
+
+def is_physical_function(PciAddress):
+ dev_path = "/sys/bus/pci/devices/%(d)s:%(b)s:%(s)s.%(f)s/" % {
+ "d": PciAddress.domain, "b": PciAddress.bus,
+ "s": PciAddress.slot, "f": PciAddress.func}
+ try:
+ dev_info = os.listdir(dev_path)
+ for dev_file in dev_info:
+ if _VIRTFN_RE.match(dev_file):
+ return True
+ else:
+ return False
+ except Exception:
+ LOG.error(_LE("PCI device %s not found") % dev_path)
+ return False
+
+
+def get_ifname_by_pci_address(pci_addr):
+ dev_path = "/sys/bus/pci/devices/%s/net" % (pci_addr)
+ try:
+ dev_info = os.listdir(dev_path)
+ return dev_info.pop()
+ except Exception:
+ LOG.error(_LE("PCI device %s not found") % pci_addr)
+ return None
diff --git a/nova/quota.py b/nova/quota.py
index bd3d364b8a..b98282ecb1 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -23,8 +23,8 @@
from nova import db
from nova import exception
-from nova.objects import keypair as keypair_obj
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova import objects
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -421,6 +421,7 @@ def limit_check(self, context, resources, values, project_id=None,
is admin and admin wants to impact on
common user.
"""
+ _valid_method_call_check_resources(values, 'check')
# Ensure no value is less than zero
unders = [key for key, val in values.items() if val < 0]
@@ -502,6 +503,7 @@ def reserve(self, context, resources, deltas, expire=None,
is admin and admin wants to impact on
common user.
"""
+ _valid_method_call_check_resources(deltas, 'reserve')
# Set up the reservation expiration
if expire is None:
@@ -994,6 +996,7 @@ def default(self):
class ReservableResource(BaseResource):
"""Describe a reservable resource."""
+ valid_method = 'reserve'
def __init__(self, name, sync, flag=None):
"""Initializes a ReservableResource.
@@ -1031,8 +1034,7 @@ def __init__(self, name, sync, flag=None):
class AbsoluteResource(BaseResource):
"""Describe a non-reservable resource."""
-
- pass
+ valid_method = 'check'
class CountableResource(AbsoluteResource):
@@ -1096,6 +1098,10 @@ def _driver(self):
def __contains__(self, resource):
return resource in self._resources
+ def __getitem__(self, key):
+ if key in self._resources:
+ return self._resources[key]
+
def register_resource(self, resource):
"""Register a resource."""
@@ -1405,6 +1411,11 @@ def resources(self):
return sorted(self._resources.keys())
+def _keypair_get_count_by_user(*args, **kwargs):
+ """Helper method to avoid referencing objects.KeyPairList on import."""
+ return objects.KeyPairList.get_count_by_user(*args, **kwargs)
+
+
QUOTAS = QuotaEngine()
@@ -1426,9 +1437,25 @@ def resources(self):
CountableResource('security_group_rules',
db.security_group_rule_count_by_group,
'quota_security_group_rules'),
- CountableResource('key_pairs', keypair_obj.KeyPairList.get_count_by_user,
+ CountableResource('key_pairs', _keypair_get_count_by_user,
'quota_key_pairs'),
]
QUOTAS.register_resources(resources)
+
+
+def _valid_method_call_check_resource(name, method):
+ if name not in QUOTAS:
+ raise exception.InvalidQuotaMethodUsage(method=method, res=name)
+ res = QUOTAS[name]
+
+ if res.valid_method != method:
+ raise exception.InvalidQuotaMethodUsage(method=method, res=name)
+
+
+def _valid_method_call_check_resources(resource, method):
+ """A method to check whether the resource can use the quota method."""
+
+ for name in resource.keys():
+ _valid_method_call_check_resource(name, method)
diff --git a/nova/safe_utils.py b/nova/safe_utils.py
index a6d2734733..ce9499bf80 100644
--- a/nova/safe_utils.py
+++ b/nova/safe_utils.py
@@ -30,7 +30,7 @@ def getcallargs(function, *args, **kwargs):
keyed_args.update(kwargs)
- #NOTE(alaski) the implicit 'self' or 'cls' argument shows up in
+ # NOTE(alaski) the implicit 'self' or 'cls' argument shows up in
# argnames but not in args or kwargs. Uses 'in' rather than '==' because
# some tests use 'self2'.
if 'self' in argnames[0] or 'cls' == argnames[0]:
diff --git a/nova/scheduler/baremetal_host_manager.py b/nova/scheduler/baremetal_host_manager.py
index be59575d59..2869af506b 100644
--- a/nova/scheduler/baremetal_host_manager.py
+++ b/nova/scheduler/baremetal_host_manager.py
@@ -18,57 +18,25 @@
Manage hosts in the current zone.
"""
-from nova.openstack.common import jsonutils
+import nova.scheduler.base_baremetal_host_manager as bbhm
from nova.scheduler import host_manager
-class BaremetalNodeState(host_manager.HostState):
+class BaremetalNodeState(bbhm.BaseBaremetalNodeState):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
+ pass
- def update_from_compute_node(self, compute):
- """Update information about a host from its compute_node info."""
- all_ram_mb = compute['memory_mb']
- free_disk_mb = compute['free_disk_gb'] * 1024
- free_ram_mb = compute['free_ram_mb']
-
- self.free_ram_mb = free_ram_mb
- self.total_usable_ram_mb = all_ram_mb
- self.free_disk_mb = free_disk_mb
- self.vcpus_total = compute['vcpus']
- self.vcpus_used = compute['vcpus_used']
-
- stats = compute.get('stats', '{}')
- self.stats = jsonutils.loads(stats)
-
- def consume_from_instance(self, instance):
- self.free_ram_mb = 0
- self.free_disk_mb = 0
- self.vcpus_used = self.vcpus_total
-
-
-def new_host_state(self, host, node, **kwargs):
- """Returns an instance of BaremetalNodeState or HostState according to
- compute['cpu_info']. If 'cpu_info' equals 'baremetal cpu', it returns an
- instance of BaremetalNodeState. If not, returns an instance of HostState.
- """
- compute = kwargs.get('compute')
-
- if compute and compute.get('cpu_info') == 'baremetal cpu':
- return BaremetalNodeState(host, node, **kwargs)
- else:
- return host_manager.HostState(host, node, **kwargs)
-
-
-class BaremetalHostManager(host_manager.HostManager):
+class BaremetalHostManager(bbhm.BaseBaremetalHostManager):
"""Bare-Metal HostManager class."""
- # Override.
- # Yes, this is not a class, and it is OK
- host_state_cls = new_host_state
-
- def __init__(self):
- super(BaremetalHostManager, self).__init__()
+ def host_state_cls(self, host, node, **kwargs):
+ """Factory function/property to create a new HostState."""
+ compute = kwargs.get('compute')
+ if compute and compute.get('cpu_info') == 'baremetal cpu':
+ return BaremetalNodeState(host, node, **kwargs)
+ else:
+ return host_manager.HostState(host, node, **kwargs)
diff --git a/nova/scheduler/base_baremetal_host_manager.py b/nova/scheduler/base_baremetal_host_manager.py
new file mode 100644
index 0000000000..99baba117b
--- /dev/null
+++ b/nova/scheduler/base_baremetal_host_manager.py
@@ -0,0 +1,57 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Manage hosts in the current zone.
+"""
+
+from nova.openstack.common import jsonutils
+from nova.scheduler import host_manager
+
+
+class BaseBaremetalNodeState(host_manager.HostState):
+ """Mutable and immutable information tracked for a host.
+ This is an attempt to remove the ad-hoc data structures
+ previously used and lock down access.
+ """
+
+ def update_from_compute_node(self, compute):
+ """Update information about a host from its compute_node info."""
+ self.vcpus_total = compute['vcpus']
+ self.vcpus_used = compute['vcpus_used']
+
+ self.free_ram_mb = compute['free_ram_mb']
+ self.total_usable_ram_mb = compute['memory_mb']
+ self.free_disk_mb = compute['free_disk_gb'] * 1024
+
+ stats = compute.get('stats', '{}')
+ self.stats = jsonutils.loads(stats)
+
+ def consume_from_instance(self, instance):
+ """Consume nodes entire resources regardless of instance request."""
+ self.free_ram_mb = 0
+ self.free_disk_mb = 0
+ self.vcpus_used = self.vcpus_total
+
+
+class BaseBaremetalHostManager(host_manager.HostManager):
+ """Base class for Baremetal and Ironic HostManager classes."""
+
+ def host_state_cls(self, host, node, **kwargs):
+ """Factory function to create a new HostState. May be overridden
+ in subclasses to extend functionality.
+ """
+ return BaseBaremetalNodeState(host, node, **kwargs)
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index bb04eb9c3d..65a24030c0 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -25,7 +25,7 @@
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.scheduler import driver
CONF = cfg.CONF
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index cb1942262f..d1a2fd4245 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -27,8 +27,8 @@
from nova.compute import vm_states
from nova import db
from nova import exception
+from nova.i18n import _, _LW
from nova import notifications
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -52,7 +52,11 @@ def handle_schedule_error(context, ex, instance_uuid, request_spec):
send notifications.
"""
- if not isinstance(ex, exception.NoValidHost):
+ if isinstance(ex, exception.NoValidHost):
+ LOG.warning(_LW("NoValidHost exception with message: \'%s\'"),
+ ex.format_message().strip(),
+ instance_uuid=instance_uuid)
+ else:
LOG.exception(_("Exception during scheduler.run_instance"))
state = vm_states.ERROR.upper()
LOG.warning(_('Setting instance to %s state.'), state,
diff --git a/nova/scheduler/filter_scheduler.py b/nova/scheduler/filter_scheduler.py
index 96883b08c8..b71a257141 100644
--- a/nova/scheduler/filter_scheduler.py
+++ b/nova/scheduler/filter_scheduler.py
@@ -25,8 +25,8 @@
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
-from nova.objects import instance_group as instance_group_obj
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova import objects
from nova.openstack.common import log as logging
from nova.pci import pci_request
from nova import rpc
@@ -61,6 +61,10 @@ def __init__(self, *args, **kwargs):
self.options = scheduler_options.SchedulerOptions()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.notifier = rpc.get_notifier('scheduler')
+ self._supports_affinity = scheduler_utils.validate_filter(
+ 'ServerGroupAffinityFilter')
+ self._supports_anti_affinity = scheduler_utils.validate_filter(
+ 'ServerGroupAntiAffinityFilter')
# NOTE(alaski): Remove this method when the scheduler rpc interface is
# bumped to 4.x as it is no longer used.
@@ -136,6 +140,9 @@ def schedule_run_instance(self, context, request_spec,
def select_destinations(self, context, request_spec, filter_properties):
"""Selects a filtered set of hosts and nodes."""
+ self.notifier.info(context, 'scheduler.select_destinations.start',
+ dict(request_spec=request_spec))
+
num_instances = request_spec['num_instances']
selected_hosts = self._schedule(context, request_spec,
filter_properties)
@@ -146,6 +153,9 @@ def select_destinations(self, context, request_spec, filter_properties):
dests = [dict(host=host.obj.host, nodename=host.obj.nodename,
limits=host.obj.limits) for host in selected_hosts]
+
+ self.notifier.info(context, 'scheduler.select_destinations.end',
+ dict(request_spec=request_spec))
return dests
def _provision_resource(self, context, weighed_host, request_spec,
@@ -202,16 +212,24 @@ def populate_filter_properties(self, request_spec, filter_properties):
if pci_requests:
filter_properties['pci_requests'] = pci_requests
- @staticmethod
- def _setup_instance_group(context, filter_properties):
+ def _setup_instance_group(self, context, filter_properties):
update_group_hosts = False
scheduler_hints = filter_properties.get('scheduler_hints') or {}
group_hint = scheduler_hints.get('group', None)
if group_hint:
- group = instance_group_obj.InstanceGroup.get_by_hint(context,
- group_hint)
+ group = objects.InstanceGroup.get_by_hint(context, group_hint)
policies = set(('anti-affinity', 'affinity'))
if any((policy in policies) for policy in group.policies):
+ if ('affinity' in group.policies and
+ not self._supports_affinity):
+ msg = _("ServerGroupAffinityFilter not configured")
+ LOG.error(msg)
+ raise exception.NoValidHost(reason=msg)
+ if ('anti-affinity' in group.policies and
+ not self._supports_anti_affinity):
+ msg = _("ServerGroupAntiAffinityFilter not configured")
+ LOG.error(msg)
+ raise exception.NoValidHost(reason=msg)
update_group_hosts = True
filter_properties.setdefault('group_hosts', set())
user_hosts = set(filter_properties['group_hosts'])
diff --git a/nova/scheduler/filters/availability_zone_filter.py b/nova/scheduler/filters/availability_zone_filter.py
index 390aabb34d..1452febd29 100644
--- a/nova/scheduler/filters/availability_zone_filter.py
+++ b/nova/scheduler/filters/availability_zone_filter.py
@@ -16,8 +16,11 @@
from oslo.config import cfg
from nova import db
+from nova.openstack.common import log as logging
from nova.scheduler import filters
+LOG = logging.getLogger(__name__)
+
CONF = cfg.CONF
CONF.import_opt('default_availability_zone', 'nova.availability_zones')
@@ -38,13 +41,25 @@ def host_passes(self, host_state, filter_properties):
props = spec.get('instance_properties', {})
availability_zone = props.get('availability_zone')
- if availability_zone:
- context = filter_properties['context']
- metadata = db.aggregate_metadata_get_by_host(
- context, host_state.host, key='availability_zone')
- if 'availability_zone' in metadata:
- return availability_zone in metadata['availability_zone']
- else:
- return availability_zone == CONF.default_availability_zone
+ if not availability_zone:
+ return True
+
+ context = filter_properties['context']
+ metadata = db.aggregate_metadata_get_by_host(
+ context, host_state.host, key='availability_zone')
+
+ if 'availability_zone' in metadata:
+ hosts_passes = availability_zone in metadata['availability_zone']
+ host_az = metadata['availability_zone']
+ else:
+ hosts_passes = availability_zone == CONF.default_availability_zone
+ host_az = CONF.default_availability_zone
+
+ if not hosts_passes:
+ LOG.debug("Availability Zone '%(az)s' requested. "
+ "%(host_state)s has AZs: %(host_az)s",
+ {'host_state': host_state,
+ 'az': availability_zone,
+ 'host_az': host_az})
- return True
+ return hosts_passes
diff --git a/nova/scheduler/filters/compute_capabilities_filter.py b/nova/scheduler/filters/compute_capabilities_filter.py
index 178ca49363..ac68d509fb 100644
--- a/nova/scheduler/filters/compute_capabilities_filter.py
+++ b/nova/scheduler/filters/compute_capabilities_filter.py
@@ -13,6 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import six
+
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova.scheduler.filters import extra_specs_ops
@@ -45,6 +48,11 @@ def _satisfies_extra_specs(self, host_state, instance_type):
cap = host_state
for index in range(0, len(scope)):
try:
+ if isinstance(cap, six.string_types):
+ try:
+ cap = jsonutils.loads(cap)
+ except ValueError:
+ return False
if not isinstance(cap, dict):
if getattr(cap, scope[index], None) is None:
# If can't find, check stats dict
diff --git a/nova/scheduler/filters/compute_filter.py b/nova/scheduler/filters/compute_filter.py
index 94aa2490bb..9b7022401d 100644
--- a/nova/scheduler/filters/compute_filter.py
+++ b/nova/scheduler/filters/compute_filter.py
@@ -15,7 +15,7 @@
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova import servicegroup
diff --git a/nova/scheduler/filters/core_filter.py b/nova/scheduler/filters/core_filter.py
index 45d97b9ba7..d588a22dfd 100644
--- a/nova/scheduler/filters/core_filter.py
+++ b/nova/scheduler/filters/core_filter.py
@@ -17,7 +17,7 @@
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _LW
+from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova.scheduler.filters import utils
@@ -61,7 +61,17 @@ def host_passes(self, host_state, filter_properties):
if vcpus_total > 0:
host_state.limits['vcpu'] = vcpus_total
- return (vcpus_total - host_state.vcpus_used) >= instance_vcpus
+ free_vcpus = vcpus_total - host_state.vcpus_used
+ if free_vcpus < instance_vcpus:
+ LOG.debug("%(host_state)s does not have %(instance_vcpus)d "
+ "usable vcpus, it only has %(free_vcpus)d usable "
+ "vcpus",
+ {'host_state': host_state,
+ 'instance_vcpus': instance_vcpus,
+ 'free_vcpus': free_vcpus})
+ return False
+
+ return True
class CoreFilter(BaseCoreFilter):
diff --git a/nova/scheduler/filters/disk_filter.py b/nova/scheduler/filters/disk_filter.py
index b9c4004013..a16a3d2094 100644
--- a/nova/scheduler/filters/disk_filter.py
+++ b/nova/scheduler/filters/disk_filter.py
@@ -15,8 +15,10 @@
from oslo.config import cfg
+from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.scheduler import filters
+from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
@@ -30,6 +32,9 @@
class DiskFilter(filters.BaseHostFilter):
"""Disk Filter with over subscription flag."""
+ def _get_disk_allocation_ratio(self, host_state, filter_properties):
+ return CONF.disk_allocation_ratio
+
def host_passes(self, host_state, filter_properties):
"""Filter based on disk usage."""
instance_type = filter_properties.get('instance_type')
@@ -40,7 +45,10 @@ def host_passes(self, host_state, filter_properties):
free_disk_mb = host_state.free_disk_mb
total_usable_disk_mb = host_state.total_usable_disk_gb * 1024
- disk_mb_limit = total_usable_disk_mb * CONF.disk_allocation_ratio
+ disk_allocation_ratio = self._get_disk_allocation_ratio(
+ host_state, filter_properties)
+
+ disk_mb_limit = total_usable_disk_mb * disk_allocation_ratio
used_disk_mb = total_usable_disk_mb - free_disk_mb
usable_disk_mb = disk_mb_limit - used_disk_mb
@@ -55,3 +63,28 @@ def host_passes(self, host_state, filter_properties):
disk_gb_limit = disk_mb_limit / 1024
host_state.limits['disk_gb'] = disk_gb_limit
return True
+
+
+class AggregateDiskFilter(DiskFilter):
+ """AggregateDiskFilter with per-aggregate disk allocation ratio flag.
+
+ Fall back to global disk_allocation_ratio if no per-aggregate setting
+ found.
+ """
+
+ def _get_disk_allocation_ratio(self, host_state, filter_properties):
+ # TODO(uni): DB query in filter is a performance hit, especially for
+ # system with lots of hosts. Will need a general solution here to fix
+ # all filters with aggregate DB call things.
+ aggregate_vals = utils.aggregate_values_from_db(
+ filter_properties['context'],
+ host_state.host,
+ 'disk_allocation_ratio')
+ try:
+ ratio = utils.validate_num_values(
+ aggregate_vals, CONF.disk_allocation_ratio, cast_to=float)
+ except ValueError as e:
+ LOG.warn(_LW("Could not decode disk_allocation_ratio: '%s'"), e)
+ ratio = CONF.disk_allocation_ratio
+
+ return ratio
diff --git a/nova/scheduler/filters/exact_core_filter.py b/nova/scheduler/filters/exact_core_filter.py
new file mode 100644
index 0000000000..fbe718ebf8
--- /dev/null
+++ b/nova/scheduler/filters/exact_core_filter.py
@@ -0,0 +1,51 @@
+# Copyright (c) 2014 OpenStack Foundation
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.i18n import _
+from nova.openstack.common import log as logging
+from nova.scheduler import filters
+
+LOG = logging.getLogger(__name__)
+
+
+class ExactCoreFilter(filters.BaseHostFilter):
+ """Exact Core Filter."""
+
+ def host_passes(self, host_state, filter_properties):
+ """Return True if host has the exact number of CPU cores."""
+ instance_type = filter_properties.get('instance_type')
+ if not instance_type:
+ return True
+
+ if not host_state.vcpus_total:
+ # Fail safe
+ LOG.warning(_("VCPUs not set; assuming CPU collection broken"))
+ return False
+
+ required_vcpus = instance_type['vcpus']
+ usable_vcpus = host_state.vcpus_total - host_state.vcpus_used
+
+ if required_vcpus != usable_vcpus:
+ LOG.debug("%(host_state)s does not have exactly "
+ "%(requested_vcpus)s cores of usable vcpu, it has "
+ "%(usable_vcpus)s.",
+ {'host_state': host_state,
+ 'requested_vcpus': required_vcpus,
+ 'usable_vcpus': usable_vcpus})
+ return False
+
+ return True
diff --git a/nova/scheduler/filters/exact_disk_filter.py b/nova/scheduler/filters/exact_disk_filter.py
new file mode 100644
index 0000000000..543eb4c75e
--- /dev/null
+++ b/nova/scheduler/filters/exact_disk_filter.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.openstack.common import log as logging
+from nova.scheduler import filters
+
+LOG = logging.getLogger(__name__)
+
+
+class ExactDiskFilter(filters.BaseHostFilter):
+ """Exact Disk Filter."""
+
+ def host_passes(self, host_state, filter_properties):
+ """Return True if host has the exact amount of disk available."""
+ instance_type = filter_properties.get('instance_type')
+ requested_disk = (1024 * (instance_type['root_gb'] +
+ instance_type['ephemeral_gb']) +
+ instance_type['swap'])
+
+ if requested_disk != host_state.free_disk_mb:
+ LOG.debug("%(host_state)s does not have exactly "
+ "%(requested_disk)s MB usable disk, it "
+ "has %(usable_disk_mb)s.",
+ {'host_state': host_state,
+ 'requested_disk': requested_disk,
+ 'usable_disk_mb': host_state.free_disk_mb})
+ return False
+
+ return True
diff --git a/nova/scheduler/filters/exact_ram_filter.py b/nova/scheduler/filters/exact_ram_filter.py
new file mode 100644
index 0000000000..efd845aa6b
--- /dev/null
+++ b/nova/scheduler/filters/exact_ram_filter.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.openstack.common import log as logging
+from nova.scheduler import filters
+
+LOG = logging.getLogger(__name__)
+
+
+class ExactRamFilter(filters.BaseHostFilter):
+ """Exact RAM Filter."""
+
+ def host_passes(self, host_state, filter_properties):
+ """Return True if host has the exact amount of RAM available."""
+ instance_type = filter_properties.get('instance_type')
+ requested_ram = instance_type['memory_mb']
+ if requested_ram != host_state.free_ram_mb:
+ LOG.debug("%(host_state)s does not have exactly "
+ "%(requested_ram)s MB usable RAM, it has "
+ "%(usable_ram)s.",
+ {'host_state': host_state,
+ 'requested_ram': requested_ram,
+ 'usable_ram': host_state.free_ram_mb})
+ return False
+
+ return True
diff --git a/nova/scheduler/filters/io_ops_filter.py b/nova/scheduler/filters/io_ops_filter.py
index de9ce5ab80..1ac20356d3 100644
--- a/nova/scheduler/filters/io_ops_filter.py
+++ b/nova/scheduler/filters/io_ops_filter.py
@@ -15,8 +15,10 @@
from oslo.config import cfg
+from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.scheduler import filters
+from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
@@ -34,12 +36,16 @@
class IoOpsFilter(filters.BaseHostFilter):
"""Filter out hosts with too many concurrent I/O operations."""
+ def _get_max_io_ops_per_host(self, host_state, filter_properties):
+ return CONF.max_io_ops_per_host
+
def host_passes(self, host_state, filter_properties):
"""Use information about current vm and task states collected from
compute node statistics to decide whether to filter.
"""
num_io_ops = host_state.num_io_ops
- max_io_ops = CONF.max_io_ops_per_host
+ max_io_ops = self._get_max_io_ops_per_host(
+ host_state, filter_properties)
passes = num_io_ops < max_io_ops
if not passes:
LOG.debug("%(host_state)s fails I/O ops check: Max IOs per host "
@@ -47,3 +53,27 @@ def host_passes(self, host_state, filter_properties):
{'host_state': host_state,
'max_io_ops': max_io_ops})
return passes
+
+
+class AggregateIoOpsFilter(IoOpsFilter):
+ """AggregateIoOpsFilter with per-aggregate the max io operations.
+
+ Fall back to global max_io_ops_per_host if no per-aggregate setting found.
+ """
+
+ def _get_max_io_ops_per_host(self, host_state, filter_properties):
+ # TODO(uni): DB query in filter is a performance hit, especially for
+ # system with lots of hosts. Will need a general solution here to fix
+ # all filters with aggregate DB call things.
+ aggregate_vals = utils.aggregate_values_from_db(
+ filter_properties['context'],
+ host_state.host,
+ 'max_io_ops_per_host')
+ try:
+ value = utils.validate_num_values(
+ aggregate_vals, CONF.max_io_ops_per_host, cast_to=int)
+ except ValueError as e:
+ LOG.warn(_LW("Could not decode max_io_ops_per_host: '%s'"), e)
+ value = CONF.max_io_ops_per_host
+
+ return value
diff --git a/nova/scheduler/filters/isolated_hosts_filter.py b/nova/scheduler/filters/isolated_hosts_filter.py
index a15193a8f6..6d383dde09 100644
--- a/nova/scheduler/filters/isolated_hosts_filter.py
+++ b/nova/scheduler/filters/isolated_hosts_filter.py
@@ -41,18 +41,20 @@ class IsolatedHostsFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Result Matrix with 'restrict_isolated_hosts_to_isolated_images' set
- to True:
- | isolated_image | non_isolated_image
- -------------+----------------+-------------------
- iso_host | True | False
- non_iso_host | False | True
+ to True::
+
+ | | isolated_image | non_isolated_image
+ | -------------+----------------+-------------------
+ | iso_host | True | False
+ | non_iso_host | False | True
Result Matrix with 'restrict_isolated_hosts_to_isolated_images' set
- to False:
- | isolated_image | non_isolated_image
- -------------+----------------+-------------------
- iso_host | True | True
- non_iso_host | False | True
+ to False::
+
+ | | isolated_image | non_isolated_image
+ | -------------+----------------+-------------------
+ | iso_host | True | True
+ | non_iso_host | False | True
"""
# If the configuration does not list any hosts, the filter will always
diff --git a/nova/scheduler/filters/pci_passthrough_filter.py b/nova/scheduler/filters/pci_passthrough_filter.py
index dc71e18baa..5855649f24 100644
--- a/nova/scheduler/filters/pci_passthrough_filter.py
+++ b/nova/scheduler/filters/pci_passthrough_filter.py
@@ -13,8 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova.openstack.common import log as logging
from nova.scheduler import filters
+LOG = logging.getLogger(__name__)
+
class PciPassthroughFilter(filters.BaseHostFilter):
"""Pci Passthrough Filter based on PCI request
@@ -23,18 +26,27 @@ class PciPassthroughFilter(filters.BaseHostFilter):
to meet the device requests in the 'extra_specs' for the flavor.
PCI resource tracker provides updated summary information about the
- PCI devices for each host, like:
- [{"count": 5, "vendor_id": "8086", "product_id": "1520",
- "extra_info":'{}'}],
- and VM requests PCI devices via PCI requests, like:
- [{"count": 1, "vendor_id": "8086", "product_id": "1520",}].
+ PCI devices for each host, like::
+
+ | [{"count": 5, "vendor_id": "8086", "product_id": "1520",
+ | "extra_info":'{}'}],
+
+ and VM requests PCI devices via PCI requests, like::
+
+ | [{"count": 1, "vendor_id": "8086", "product_id": "1520",}].
The filter checks if the host passes or not based on this information.
+
"""
def host_passes(self, host_state, filter_properties):
"""Return true if the host has the required PCI devices."""
- if not filter_properties.get('pci_requests'):
+ pci_requests = filter_properties.get('pci_requests')
+ if not pci_requests:
return True
- return host_state.pci_stats.support_requests(
- filter_properties.get('pci_requests'))
+ if not host_state.pci_stats.support_requests(pci_requests):
+ LOG.debug("%(host_state)s doesn't have the required PCI devices"
+ " (%(requests)s)",
+ {'host_state': host_state, 'requests': pci_requests})
+ return False
+ return True
diff --git a/nova/scheduler/filters/ram_filter.py b/nova/scheduler/filters/ram_filter.py
index 9afcceaa97..4677d2feb3 100644
--- a/nova/scheduler/filters/ram_filter.py
+++ b/nova/scheduler/filters/ram_filter.py
@@ -16,7 +16,7 @@
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _LW
+from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova.scheduler.filters import utils
diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py
index 29cd4f4f5e..6be3f57a09 100644
--- a/nova/scheduler/filters/trusted_filter.py
+++ b/nova/scheduler/filters/trusted_filter.py
@@ -18,14 +18,14 @@
Filter to add support for Trusted Computing Pools.
Filter that only schedules tasks on a host if the integrity (trust)
-of that host matches the trust requested in the `extra_specs' for the
-flavor. The `extra_specs' will contain a key/value pair where the
-key is `trust'. The value of this pair (`trusted'/`untrusted') must
+of that host matches the trust requested in the ``extra_specs`` for the
+flavor. The ``extra_specs`` will contain a key/value pair where the
+key is ``trust``. The value of this pair (``trusted``/``untrusted``) must
match the integrity of that host (obtained from the Attestation
service) before the task can be scheduled on that host.
Note that the parameters to control access to the Attestation Service
-are in the `nova.conf' file in a separate `trust' section. For example,
+are in the ``nova.conf`` file in a separate ``trust`` section. For example,
the config file will look something like:
[DEFAULT]
@@ -34,7 +34,8 @@
[trust]
server=attester.mynetwork.com
-Details on the specific parameters can be found in the file `trust_attest.py'.
+Details on the specific parameters can be found in the file
+``trust_attest.py``.
Details on setting up and using an Attestation Service can be found at
the Open Attestation project at:
@@ -50,7 +51,6 @@
from nova import context
from nova import db
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -176,7 +176,7 @@ def do_attestation(self, hosts):
result = None
status, data = self._request("POST", "PollHosts", hosts)
- if data != None:
+ if data is not None:
result = data.get('hosts')
return result
@@ -203,11 +203,7 @@ def __init__(self):
# host in the first round that scheduler invokes us.
computes = db.compute_node_get_all(admin)
for compute in computes:
- service = compute['service']
- if not service:
- LOG.warn(_("No service for compute ID %s") % compute['id'])
- continue
- host = service['host']
+ host = compute['hypervisor_hostname']
self._init_cache_entry(host)
def _cache_valid(self, host):
@@ -241,9 +237,16 @@ def _update_cache_entry(self, state):
entry['vtime'] = timeutils.normalize_time(
timeutils.parse_isotime(state['vtime']))
except ValueError:
- # Mark the system as un-trusted if get invalid vtime.
- entry['trust_lvl'] = 'unknown'
- entry['vtime'] = timeutils.utcnow()
+ try:
+ # Mt. Wilson does not necessarily return an ISO8601 formatted
+ # `vtime`, so we should try to parse it as a string formatted
+ # datetime.
+ vtime = timeutils.parse_strtime(state['vtime'], fmt="%c")
+ entry['vtime'] = timeutils.normalize_time(vtime)
+ except ValueError:
+ # Mark the system as un-trusted if get invalid vtime.
+ entry['trust_lvl'] = 'unknown'
+ entry['vtime'] = timeutils.utcnow()
self.compute_nodes[host] = entry
@@ -284,7 +287,7 @@ def host_passes(self, host_state, filter_properties):
instance_type = filter_properties.get('instance_type', {})
extra = instance_type.get('extra_specs', {})
trust = extra.get('trust:trusted_host')
- host = host_state.host
+ host = host_state.nodename
if trust:
return self.compute_attestation.is_trusted(host, trust)
return True
diff --git a/nova/scheduler/filters/type_filter.py b/nova/scheduler/filters/type_filter.py
index c588239534..6c4712a83e 100644
--- a/nova/scheduler/filters/type_filter.py
+++ b/nova/scheduler/filters/type_filter.py
@@ -29,7 +29,7 @@ class TypeAffinityFilter(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
"""Dynamically limits hosts to one instance type
- Return False if host has any instance types other then the requested
+ Return False if host has any instance types other than the requested
type. Return True if all instance types match or if host is empty.
"""
diff --git a/nova/scheduler/filters/utils.py b/nova/scheduler/filters/utils.py
index 580b2cb385..151811f4d9 100644
--- a/nova/scheduler/filters/utils.py
+++ b/nova/scheduler/filters/utils.py
@@ -15,8 +15,8 @@
"""Bench of utility methods used by filters."""
+from nova.i18n import _LI
from nova.objects import aggregate
-from nova.openstack.common.gettextutils import _LI
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 227efa30cd..2708b843e2 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -26,7 +26,7 @@
from nova.compute import vm_states
from nova import db
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -149,9 +149,9 @@ def update_service(self, service):
self.service = ReadOnlyDict(service)
def _update_metrics_from_compute_node(self, compute):
- #NOTE(llu): The 'or []' is to avoid json decode failure of None
- # returned from compute.get, because DB schema allows
- # NULL in the metrics column
+ # NOTE(llu): The 'or []' is to avoid json decode failure of None
+ # returned from compute.get, because DB schema allows
+ # NULL in the metrics column
metrics = compute.get('metrics', []) or []
if metrics:
metrics = jsonutils.loads(metrics)
@@ -189,7 +189,7 @@ def update_from_compute_node(self, compute):
self.disk_mb_used = compute['local_gb_used'] * 1024
- #NOTE(jogo) free_ram_mb can be negative
+ # NOTE(jogo) free_ram_mb can be negative
self.free_ram_mb = compute['free_ram_mb']
self.total_usable_ram_mb = all_ram_mb
self.total_usable_disk_gb = compute['local_gb']
diff --git a/nova/scheduler/ironic_host_manager.py b/nova/scheduler/ironic_host_manager.py
new file mode 100644
index 0000000000..409c6dd1cc
--- /dev/null
+++ b/nova/scheduler/ironic_host_manager.py
@@ -0,0 +1,93 @@
+# Copyright (c) 2012 NTT DOCOMO, INC.
+# Copyright (c) 2011-2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Ironic host manager.
+
+This host manager will consume all cpu's, disk space, and
+ram from a host / node as it is supporting Baremetal hosts, which can not be
+subdivided into multiple instances.
+"""
+from oslo.config import cfg
+
+from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
+import nova.scheduler.base_baremetal_host_manager as bbhm
+from nova.scheduler import host_manager
+
+host_manager_opts = [
+ cfg.ListOpt('baremetal_scheduler_default_filters',
+ default=[
+ 'RetryFilter',
+ 'AvailabilityZoneFilter',
+ 'ComputeFilter',
+ 'ComputeCapabilitiesFilter',
+ 'ImagePropertiesFilter',
+ 'ExactRamFilter',
+ 'ExactDiskFilter',
+ 'ExactCoreFilter',
+ ],
+ help='Which filter class names to use for filtering '
+ 'baremetal hosts when not specified in the request.'),
+ cfg.BoolOpt('scheduler_use_baremetal_filters',
+ default=False,
+ help='Flag to decide whether to use '
+ 'baremetal_scheduler_default_filters or not.'),
+
+ ]
+
+CONF = cfg.CONF
+CONF.register_opts(host_manager_opts)
+
+LOG = logging.getLogger(__name__)
+
+
+class IronicNodeState(bbhm.BaseBaremetalNodeState):
+ """Mutable and immutable information tracked for a host.
+ This is an attempt to remove the ad-hoc data structures
+ previously used and lock down access.
+ """
+
+ def update_from_compute_node(self, compute):
+ """Update information about a host from its compute_node info."""
+ super(IronicNodeState, self).update_from_compute_node(compute)
+
+ self.total_usable_disk_gb = compute['local_gb']
+ self.updated = compute['updated_at']
+
+ def consume_from_instance(self, instance):
+ """Consume nodes entire resources regardless of instance request."""
+ super(IronicNodeState, self).consume_from_instance(instance)
+
+ self.updated = timeutils.utcnow()
+
+
+class IronicHostManager(bbhm.BaseBaremetalHostManager):
+ """Ironic HostManager class."""
+
+ def __init__(self):
+ super(IronicHostManager, self).__init__()
+ if CONF.scheduler_use_baremetal_filters:
+ baremetal_default = CONF.baremetal_scheduler_default_filters
+ CONF.scheduler_default_filters = baremetal_default
+
+ def host_state_cls(self, host, node, **kwargs):
+ """Factory function/property to create a new HostState."""
+ compute = kwargs.get('compute')
+ if compute and compute.get('cpu_info') == 'baremetal cpu':
+ return IronicNodeState(host, node, **kwargs)
+ else:
+ return host_manager.HostState(host, node, **kwargs)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 2d94a4bb27..c370d0cef2 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -99,6 +99,8 @@ def run_instance(self, context, request_spec, admin_password,
'task_state': None},
context, ex, request_spec)
+ # NOTE(sbauza): Remove this method when the scheduler rpc interface is
+ # bumped to 4.x as it is no longer used.
def prep_resize(self, context, image, request_spec, filter_properties,
instance, instance_type, reservations):
"""Tries to call schedule_prep_resize on the driver.
diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py
index 3ff86f0ebf..125f4bbc35 100644
--- a/nova/scheduler/rpcapi.py
+++ b/nova/scheduler/rpcapi.py
@@ -20,7 +20,6 @@
from oslo import messaging
from nova.objects import base as objects_base
-from nova.openstack.common import jsonutils
from nova import rpc
rpcapi_opts = [
@@ -42,48 +41,49 @@ class SchedulerAPI(object):
API version history:
- 1.0 - Initial version.
- 1.1 - Changes to prep_resize():
- - remove instance_uuid, add instance
- - remove instance_type_id, add instance_type
- - remove topic, it was unused
- 1.2 - Remove topic from run_instance, it was unused
- 1.3 - Remove instance_id, add instance to live_migration
- 1.4 - Remove update_db from prep_resize
- 1.5 - Add reservations argument to prep_resize()
- 1.6 - Remove reservations argument to run_instance()
- 1.7 - Add create_volume() method, remove topic from live_migration()
-
- 2.0 - Remove 1.x backwards compat
- 2.1 - Add image_id to create_volume()
- 2.2 - Remove reservations argument to create_volume()
- 2.3 - Remove create_volume()
- 2.4 - Change update_service_capabilities()
- - accepts a list of capabilities
- 2.5 - Add get_backdoor_port()
- 2.6 - Add select_hosts()
+ * 1.0 - Initial version.
+ * 1.1 - Changes to prep_resize():
+ * remove instance_uuid, add instance
+ * remove instance_type_id, add instance_type
+ * remove topic, it was unused
+ * 1.2 - Remove topic from run_instance, it was unused
+ * 1.3 - Remove instance_id, add instance to live_migration
+ * 1.4 - Remove update_db from prep_resize
+ * 1.5 - Add reservations argument to prep_resize()
+ * 1.6 - Remove reservations argument to run_instance()
+ * 1.7 - Add create_volume() method, remove topic from live_migration()
+
+ * 2.0 - Remove 1.x backwards compat
+ * 2.1 - Add image_id to create_volume()
+ * 2.2 - Remove reservations argument to create_volume()
+ * 2.3 - Remove create_volume()
+ * 2.4 - Change update_service_capabilities()
+ * accepts a list of capabilities
+ * 2.5 - Add get_backdoor_port()
+ * 2.6 - Add select_hosts()
... Grizzly supports message version 2.6. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.6.
- 2.7 - Add select_destinations()
- 2.8 - Deprecate prep_resize() -- JUST KIDDING. It is still used
- by the compute manager for retries.
- 2.9 - Added the legacy_bdm_in_spec parameter to run_instance()
+ * 2.7 - Add select_destinations()
+ * 2.8 - Deprecate prep_resize() -- JUST KIDDING. It is still used
+ by the compute manager for retries.
+ * 2.9 - Added the legacy_bdm_in_spec parameter to run_instance()
... Havana supports message version 2.9. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.9.
- ... - Deprecated live_migration() call, moved to conductor
- ... - Deprecated select_hosts()
+ * Deprecated live_migration() call, moved to conductor
+ * Deprecated select_hosts()
3.0 - Removed backwards compat
... Icehouse supports message version 3.0. So, any changes to
existing methods in 3.x after that point should be done such that they
can handle the version_cap being set to 3.0.
+
'''
VERSION_ALIASES = {
@@ -105,16 +105,3 @@ def select_destinations(self, ctxt, request_spec, filter_properties):
cctxt = self.client.prepare()
return cctxt.call(ctxt, 'select_destinations',
request_spec=request_spec, filter_properties=filter_properties)
-
- def prep_resize(self, ctxt, instance, instance_type, image,
- request_spec, filter_properties, reservations):
- instance_p = jsonutils.to_primitive(instance)
- instance_type_p = jsonutils.to_primitive(instance_type)
- reservations_p = jsonutils.to_primitive(reservations)
- image_p = jsonutils.to_primitive(image)
- cctxt = self.client.prepare()
- cctxt.cast(ctxt, 'prep_resize',
- instance=instance_p, instance_type=instance_type_p,
- image=image_p, request_spec=request_spec,
- filter_properties=filter_properties,
- reservations=reservations_p)
diff --git a/nova/scheduler/scheduler_options.py b/nova/scheduler/scheduler_options.py
index 7dbbcd56e8..c0cf848d19 100644
--- a/nova/scheduler/scheduler_options.py
+++ b/nova/scheduler/scheduler_options.py
@@ -21,13 +21,13 @@
"""
import datetime
-import json
import os
from oslo.config import cfg
+from nova.i18n import _
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -73,7 +73,7 @@ def _get_file_timestamp(self, filename):
def _load_file(self, handle):
"""Decode the JSON file. Broken out for testing."""
try:
- return json.load(handle)
+ return jsonutils.load(handle)
except ValueError as e:
LOG.exception(_("Could not decode scheduler options: '%s'"), e)
return {}
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
index 2d2f1618d0..be75690561 100644
--- a/nova/scheduler/utils.py
+++ b/nova/scheduler/utils.py
@@ -22,9 +22,9 @@
from nova.compute import utils as compute_utils
from nova import db
from nova import exception
+from nova.i18n import _
from nova import notifications
from nova.objects import base as obj_base
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import rpc
@@ -196,9 +196,7 @@ def _add_retry_host(filter_properties, host, node):
node has already been tried.
"""
retry = filter_properties.get('retry', None)
- force_hosts = filter_properties.get('force_hosts', [])
- force_nodes = filter_properties.get('force_nodes', [])
- if not retry or force_hosts or force_nodes:
+ if not retry:
return
hosts = retry['hosts']
hosts.append([host, node])
@@ -235,3 +233,8 @@ def parse_options(opts, sep='=', converter=str, name=""):
{'name': name,
'options': ", ".join(bad)})
return good
+
+
+def validate_filter(filter):
+ """Validates that the filter is configured in the default filters."""
+ return filter in CONF.scheduler_default_filters
diff --git a/nova/scheduler/weights/__init__.py b/nova/scheduler/weights/__init__.py
index e3c7a07e8e..9d10c84765 100644
--- a/nova/scheduler/weights/__init__.py
+++ b/nova/scheduler/weights/__init__.py
@@ -31,8 +31,8 @@ def to_dict(self):
return x
def __repr__(self):
- return "WeighedHost [host: %s, weight: %s]" % (
- self.obj.host, self.weight)
+ return "WeighedHost [host: %r, weight: %s]" % (
+ self.obj, self.weight)
class BaseHostWeigher(weights.BaseWeigher):
diff --git a/nova/service.py b/nova/service.py
index 8618e7c3e3..cdb0b1f117 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -29,8 +29,8 @@
from nova import context
from nova import debugger
from nova import exception
+from nova.i18n import _
from nova.objects import base as objects_base
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py
index 0016c5c53e..bbb0fd55fd 100644
--- a/nova/servicegroup/api.py
+++ b/nova/servicegroup/api.py
@@ -20,7 +20,7 @@
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/servicegroup/drivers/db.py b/nova/servicegroup/drivers/db.py
index e7f74e7069..70b7e132ba 100644
--- a/nova/servicegroup/drivers/db.py
+++ b/nova/servicegroup/drivers/db.py
@@ -18,7 +18,7 @@
from nova import conductor
from nova import context
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _, _LE
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.servicegroup import api
@@ -107,4 +107,4 @@ def _report_state(self, service):
except Exception: # pylint: disable=W0702
if not getattr(service, 'model_disconnected', False):
service.model_disconnected = True
- LOG.exception(_('model server went away'))
+ LOG.exception(_LE('model server went away'))
diff --git a/nova/servicegroup/drivers/mc.py b/nova/servicegroup/drivers/mc.py
index e83163ff73..636dec8aa9 100644
--- a/nova/servicegroup/drivers/mc.py
+++ b/nova/servicegroup/drivers/mc.py
@@ -21,7 +21,7 @@
from nova import conductor
from nova import context
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _, _LE
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
from nova.openstack.common import timeutils
@@ -102,4 +102,4 @@ def _report_state(self, service):
except Exception: # pylint: disable=W0702
if not getattr(service, 'model_disconnected', False):
service.model_disconnected = True
- LOG.exception(_('model server went away'))
+ LOG.exception(_LE('model server went away'))
diff --git a/nova/servicegroup/drivers/zk.py b/nova/servicegroup/drivers/zk.py
index a7a8b7b465..a2dc3c83c1 100644
--- a/nova/servicegroup/drivers/zk.py
+++ b/nova/servicegroup/drivers/zk.py
@@ -20,7 +20,7 @@
from oslo.config import cfg
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _, _LE
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
@@ -87,10 +87,10 @@ def join(self, member_id, group, service=None):
try:
member = membership.Membership(self._session, path, member_id)
except RuntimeError:
- LOG.exception(_("Unable to join. It is possible that either "
- "another node exists with the same name, or "
- "this node just restarted. We will try "
- "again in a short while to make sure."))
+ LOG.exception(_LE("Unable to join. It is possible that either"
+ " another node exists with the same name, or"
+ " this node just restarted. We will try "
+ "again in a short while to make sure."))
eventlet.sleep(CONF.zookeeper.sg_retry_interval)
member = membership.Membership(self._session, path, member_id)
self._memberships[(group, member_id)] = member
diff --git a/nova/storage/linuxscsi.py b/nova/storage/linuxscsi.py
index 09669fa565..08577b0f6d 100644
--- a/nova/storage/linuxscsi.py
+++ b/nova/storage/linuxscsi.py
@@ -14,7 +14,7 @@
"""Generic linux scsi subsystem utilities."""
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
@@ -97,7 +97,7 @@ def find_multipath_device(device):
(out, err) = utils.execute('multipath', '-l', device,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
- LOG.warn(_("Multipath call failed exit (%(code)s)")
+ LOG.warn(_LW("Multipath call failed exit (%(code)s)")
% {'code': exc.exit_code})
return None
@@ -110,15 +110,15 @@ def find_multipath_device(device):
# device line output is different depending
# on /etc/multipath.conf settings.
if info[1][:2] == "dm":
- mdev = "/dev/%s" % info[1]
mdev_id = info[0]
+ mdev = '/dev/mapper/%s' % mdev_id
elif info[2][:2] == "dm":
- mdev = "/dev/%s" % info[2]
mdev_id = info[1].replace('(', '')
mdev_id = mdev_id.replace(')', '')
+ mdev = '/dev/mapper/%s' % mdev_id
if mdev is None:
- LOG.warn(_("Couldn't find multipath device %s"), line)
+ LOG.warn(_LW("Couldn't find multipath device %s"), line)
return None
LOG.debug("Found multipath device = %s", mdev)
@@ -126,6 +126,11 @@ def find_multipath_device(device):
for dev_line in device_lines:
if dev_line.find("policy") != -1:
continue
+ if '#' in dev_line:
+ LOG.warn(_LW('Skip faulty line "%(dev_line)s" of'
+ ' multipath device %(mdev)s')
+ % {'mdev': mdev, 'dev_line': dev_line})
+ continue
dev_line = dev_line.lstrip(' |-`')
dev_info = dev_line.split()
diff --git a/nova/test.py b/nova/test.py
index 1698e250bb..8c0cfff266 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -325,6 +325,7 @@ def setUp(self):
CONF.set_override('fatal_exception_format_errors', True)
CONF.set_override('enabled', True, 'osapi_v3')
CONF.set_override('force_dhcp_release', False)
+ CONF.set_override('periodic_enable', False)
def _restore_obj_registry(self):
objects_base.NovaObject._obj_classes = self._base_test_obj_backup
diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py
index 566fabba1d..a40a666484 100644
--- a/nova/tests/__init__.py
+++ b/nova/tests/__init__.py
@@ -25,13 +25,19 @@
# TODO(mikal): move eventlet imports to nova.__init__ once we move to PBR
import os
import sys
+import traceback
+
# NOTE(mikal): All of this is because if dnspython is present in your
# environment then eventlet monkeypatches socket.getaddrinfo() with an
# implementation which doesn't work for IPv6. What we're checking here is
# that the magic environment variable was set when the import happened.
+# NOTE(dims): Prevent this code from kicking in under docs generation
+# as it leads to spurious errors/warning.
+stack = traceback.extract_stack()
if ('eventlet' in sys.modules and
- os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
+ os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes' and
+ (len(stack) < 2 or 'sphinx' not in stack[-2][0])):
raise ImportError('eventlet imported before nova/cmd/__init__ '
'(env var set to %s)'
% os.environ.get('EVENTLET_NO_GREENDNS'))
diff --git a/nova/tests/api/ec2/test_api.py b/nova/tests/api/ec2/test_api.py
index c5c3a63457..664532fee5 100644
--- a/nova/tests/api/ec2/test_api.py
+++ b/nova/tests/api/ec2/test_api.py
@@ -96,7 +96,7 @@ def close(self):
pass
-class XmlConversionTestCase(test.TestCase):
+class XmlConversionTestCase(test.NoDBTestCase):
"""Unit test api xml conversion."""
def test_number_conversion(self):
conv = ec2utils._try_convert
@@ -127,7 +127,7 @@ def test_number_conversion(self):
self.assertEqual(conv(''), '')
-class Ec2utilsTestCase(test.TestCase):
+class Ec2utilsTestCase(test.NoDBTestCase):
def test_ec2_id_to_id(self):
self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30)
self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29)
@@ -398,26 +398,26 @@ def test_group_name_valid_chars_security_group(self):
(True, "test name", bad_amazon_ec2),
(False, bad_strict_ec2, "test desc"),
]
- for test in test_raise:
+ for t in test_raise:
self.expect_http()
self.mox.ReplayAll()
- self.flags(ec2_strict_validation=test[0])
+ self.flags(ec2_strict_validation=t[0])
self.assertRaises(boto_exc.EC2ResponseError,
self.ec2.create_security_group,
- test[1],
- test[2])
+ t[1],
+ t[2])
test_accept = [
(False, bad_amazon_ec2, "test desc"),
(False, "test name", bad_amazon_ec2),
]
- for test in test_accept:
+ for t in test_accept:
self.expect_http()
self.mox.ReplayAll()
- self.flags(ec2_strict_validation=test[0])
- self.ec2.create_security_group(test[1], test[2])
+ self.flags(ec2_strict_validation=t[0])
+ self.ec2.create_security_group(t[1], t[2])
self.expect_http()
self.mox.ReplayAll()
- self.ec2.delete_security_group(test[1])
+ self.ec2.delete_security_group(t[1])
def test_group_name_valid_length_security_group(self):
"""Test that we sanely handle invalid security group names.
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
index 13155700cd..0d69a9ac86 100644
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ b/nova/tests/api/ec2/test_cinder_cloud.py
@@ -34,6 +34,7 @@
from nova import test
from nova.tests import cast_as_call
from nova.tests import fake_network
+from nova.tests import fake_notifier
from nova.tests import fake_utils
from nova.tests.image import fake
from nova.tests import matchers
@@ -93,7 +94,7 @@ def setUp(self):
self.flags(compute_driver='nova.virt.fake.FakeDriver',
volume_api_class='nova.tests.fake_volume.API')
- def fake_show(meh, context, id):
+ def fake_show(meh, context, id, **kwargs):
return {'id': id,
'name': 'fake_name',
'container_format': 'ami',
@@ -126,6 +127,12 @@ def dumb(*args, **kwargs):
# Short-circuit the conductor service
self.flags(use_local=True, group='conductor')
+ # Stub out the notification service so we use the no-op serializer
+ # and avoid lazy-load traces with the wrap_exception decorator in
+ # the compute service.
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
# set up services
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
@@ -515,9 +522,9 @@ def _setUpBlockDeviceMapping(self):
def _tearDownBlockDeviceMapping(self, inst1, inst2, volumes):
for vol in volumes:
self.volume_api.delete(self.context, vol['id'])
- for uuid in (inst1['uuid'], inst2['uuid']):
+ for instance_uuid in (inst1['uuid'], inst2['uuid']):
for bdm in db.block_device_mapping_get_all_by_instance(
- self.context, uuid):
+ self.context, instance_uuid):
db.block_device_mapping_destroy(self.context, bdm['id'])
db.instance_destroy(self.context, inst2['uuid'])
db.instance_destroy(self.context, inst1['uuid'])
@@ -679,7 +686,7 @@ def _setUpImageSet(self, create_volumes_and_snapshots=False):
'mappings': mappings2,
'block_device_mapping': block_device_mapping2}}
- def fake_show(meh, context, image_id):
+ def fake_show(meh, context, image_id, **kwargs):
_images = [copy.deepcopy(image1), copy.deepcopy(image2)]
for i in _images:
if str(i['id']) == str(image_id):
@@ -852,7 +859,7 @@ def test_stop_start_with_volume(self):
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
- #Here we puke...
+ # Here we puke...
self.cloud.terminate_instances(self.context, [ec2_instance_id])
admin_ctxt = context.get_admin_context(read_deleted="no")
@@ -990,7 +997,7 @@ def test_run_with_snapshot(self):
self._assert_volume_attached(vol, instance_uuid, mountpoint)
- #Just make sure we found them
+ # Just make sure we found them
self.assertTrue(vol1_id)
self.assertTrue(vol2_id)
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 90f95e0ded..3406b9f7d3 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -60,6 +60,7 @@
from nova.tests import cast_as_call
from nova.tests import fake_block_device
from nova.tests import fake_network
+from nova.tests import fake_notifier
from nova.tests import fake_utils
from nova.tests.image import fake
from nova.tests import matchers
@@ -141,7 +142,7 @@ def setUp(self):
self.useFixture(fixtures.FakeLogger('boto'))
fake_utils.stub_out_utils_spawn_n(self.stubs)
- def fake_show(meh, context, id):
+ def fake_show(meh, context, id, **kwargs):
return {'id': id,
'name': 'fake_name',
'container_format': 'ami',
@@ -174,6 +175,12 @@ def dumb(*args, **kwargs):
# Short-circuit the conductor service
self.flags(use_local=True, group='conductor')
+ # Stub out the notification service so we use the no-op serializer
+ # and avoid lazy-load traces with the wrap_exception decorator in
+ # the compute service.
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
# set up services
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
@@ -1484,7 +1491,8 @@ def _setUpImageSet(self, create_volumes_and_snapshots=False):
mappings2 = [{'device': '/dev/sda1', 'virtual': 'root'}]
block_device_mapping2 = [{'device_name': '/dev/sdb1',
- 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e7'}]
+ 'snapshot_id': 'ccec42a2-c220-4806-b762-6b12fbb592e7',
+ 'volume_id': None}]
image2 = {
'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fake_name',
@@ -1496,7 +1504,7 @@ def _setUpImageSet(self, create_volumes_and_snapshots=False):
'mappings': mappings2,
'block_device_mapping': block_device_mapping2}}
- def fake_show(meh, context, image_id):
+ def fake_show(meh, context, image_id, **kwargs):
_images = [copy.deepcopy(image1), copy.deepcopy(image2)]
for i in _images:
if str(i['id']) == str(image_id):
@@ -1592,7 +1600,7 @@ def test_describe_image_mapping(self):
def test_describe_image_attribute(self):
describe_image_attribute = self.cloud.describe_image_attribute
- def fake_show(meh, context, id):
+ def fake_show(meh, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'status': 'active',
@@ -1660,7 +1668,7 @@ def test_modify_image_attribute(self):
'type': 'machine'},
'is_public': False}
- def fake_show(meh, context, id):
+ def fake_show(meh, context, id, **kwargs):
return copy.deepcopy(fake_metadata)
def fake_detail(self, context, **kwargs):
@@ -1933,7 +1941,7 @@ def test_run_instances(self):
'max_count': 1}
run_instances = self.cloud.run_instances
- def fake_show(self, context, id):
+ def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
@@ -1964,7 +1972,7 @@ def test_run_instances_invalid_maxcount(self):
'max_count': 0}
run_instances = self.cloud.run_instances
- def fake_show(self, context, id):
+ def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
@@ -1984,7 +1992,7 @@ def test_run_instances_invalid_mincount(self):
'min_count': 0}
run_instances = self.cloud.run_instances
- def fake_show(self, context, id):
+ def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
@@ -2005,7 +2013,7 @@ def test_run_instances_invalid_count(self):
'min_count': 2}
run_instances = self.cloud.run_instances
- def fake_show(self, context, id):
+ def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
@@ -2027,7 +2035,7 @@ def test_run_instances_availability_zone(self):
}
run_instances = self.cloud.run_instances
- def fake_show(self, context, id):
+ def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
@@ -2078,7 +2086,7 @@ def test_run_instances_idempotent(self):
run_instances = self.cloud.run_instances
- def fake_show(self, context, id):
+ def fake_show(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'properties': {
@@ -2178,7 +2186,7 @@ def test_run_instances_image_status_active(self):
'max_count': 1}
run_instances = self.cloud.run_instances
- def fake_show_stat_active(self, context, id):
+ def fake_show_stat_active(self, context, id, **kwargs):
return {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fake_name',
'container_format': 'ami',
@@ -2221,14 +2229,32 @@ def test_stop_start_instance(self):
self.cloud.start_instances,
self.context, [instance_id])
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 80,
+ 'name': 'stopped'}}]}
result = self.cloud.stop_instances(self.context, [instance_id])
- self.assertTrue(result)
+ self.assertEqual(result, expected)
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 16,
+ 'name': 'running'}}]}
result = self.cloud.start_instances(self.context, [instance_id])
- self.assertTrue(result)
+ self.assertEqual(result, expected)
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 80,
+ 'name': 'stopped'}}]}
result = self.cloud.stop_instances(self.context, [instance_id])
- self.assertTrue(result)
+ self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
@@ -2248,8 +2274,14 @@ def test_start_instances(self):
result = self.cloud.stop_instances(self.context, [instance_id])
self.assertTrue(result)
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 80,
+ 'name': 'stopped'},
+ 'currentState': {'code': 16,
+ 'name': 'running'}}]}
result = self.cloud.start_instances(self.context, [instance_id])
- self.assertTrue(result)
+ self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
@@ -2283,8 +2315,14 @@ def test_stop_instances(self):
'max_count': 1, }
instance_id = self._run_instance(**kwargs)
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 80,
+ 'name': 'stopped'}}]}
result = self.cloud.stop_instances(self.context, [instance_id])
- self.assertTrue(result)
+ self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
@@ -2384,8 +2422,14 @@ def test_terminate_instances_two_instances(self):
inst1 = self._run_instance(**kwargs)
inst2 = self._run_instance(**kwargs)
+ expected = {'instancesSet': [
+ {'instanceId': 'i-00000001',
+ 'previousState': {'code': 16,
+ 'name': 'running'},
+ 'currentState': {'code': 80,
+ 'name': 'stopped'}}]}
result = self.cloud.stop_instances(self.context, [inst1])
- self.assertTrue(result)
+ self.assertEqual(result, expected)
expected = {'instancesSet': [
{'instanceId': 'i-00000001',
@@ -2454,7 +2498,7 @@ def _do_test_create_image(self, no_reboot):
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
- def fake_show(meh, context, id):
+ def fake_show(meh, context, id, **kwargs):
bdm = [dict(snapshot_id=snapshots[0],
volume_size=1,
device_name='sda1',
@@ -2472,7 +2516,7 @@ def fake_show(meh, context, id):
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
- use_slave=False):
+ use_subordinate=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': volumes[0],
'snapshot_id': snapshots[0],
@@ -2496,7 +2540,8 @@ def fake_power_on(self, context, instance, network_info,
self.stubs.Set(fake_virt.FakeDriver, 'power_on', fake_power_on)
- def fake_power_off(self, instance):
+ def fake_power_off(self, instance,
+ shutdown_timeout, shutdown_attempts):
virt_driver['powered_off'] = True
self.stubs.Set(fake_virt.FakeDriver, 'power_off', fake_power_off)
@@ -2547,7 +2592,7 @@ def test_create_image_instance_store(self):
ec2_instance_id = self._run_instance(**kwargs)
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
- use_slave=False):
+ use_subordinate=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': volumes[0],
'snapshot_id': snapshots[0],
@@ -2568,7 +2613,7 @@ def fake_block_device_mapping_get_all_by_instance(context, inst_id,
no_reboot=True)
@staticmethod
- def _fake_bdm_get(ctxt, id, use_slave=False):
+ def _fake_bdm_get(ctxt, id, use_subordinate=False):
blockdms = [{'volume_id': 87654321,
'source_type': 'volume',
'destination_type': 'volume',
@@ -2770,7 +2815,7 @@ def test_dia_iisb(expected_result, **kwargs):
test_dia_iisb('stop', image_id='ami-2',
block_device_mapping=block_device_mapping)
- def fake_show(self, context, id_):
+ def fake_show(self, context, id_, **kwargs):
LOG.debug("id_ %s", id_)
prop = {}
@@ -2801,9 +2846,9 @@ def fake_show(self, context, id_):
'container_format': 'ami',
'status': 'active'}
- # NOTE(yamahata): create ami-3 ... ami-6
+ # NOTE(yamahata): create ami-3 ... ami-7
# ami-1 and ami-2 is already created by setUp()
- for i in range(3, 7):
+ for i in range(3, 8):
db.s3_image_create(self.context, 'ami-%d' % i)
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
@@ -2812,6 +2857,8 @@ def fake_show(self, context, id_):
test_dia_iisb('stop', image_id='ami-4')
test_dia_iisb('stop', image_id='ami-5')
test_dia_iisb('stop', image_id='ami-6')
+ test_dia_iisb('terminate', image_id='ami-7',
+ instance_initiated_shutdown_behavior='terminate')
def test_create_delete_tags(self):
@@ -3091,7 +3138,7 @@ def test_detach_volume_unattched_error(self, mock_ec2_vol_id_to_uuid):
mock_ec2_vol_id_to_uuid.assert_called_once_with(ec2_volume_id)
-class CloudTestCaseNeutronProxy(test.TestCase):
+class CloudTestCaseNeutronProxy(test.NoDBTestCase):
def setUp(self):
super(CloudTestCaseNeutronProxy, self).setUp()
cfg.CONF.set_override('security_group_api', 'neutron')
diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py
index 4f1a11481d..a058e46597 100644
--- a/nova/tests/api/ec2/test_ec2_validate.py
+++ b/nova/tests/api/ec2/test_ec2_validate.py
@@ -28,6 +28,7 @@
from nova import test
from nova.tests import cast_as_call
from nova.tests import fake_network
+from nova.tests import fake_notifier
from nova.tests.image import fake
CONF = cfg.CONF
@@ -48,6 +49,15 @@ def dumb(*args, **kwargs):
# set up our cloud
self.cloud = cloud.CloudController()
+ # Short-circuit the conductor service
+ self.flags(use_local=True, group='conductor')
+
+ # Stub out the notification service so we use the no-op serializer
+ # and avoid lazy-load traces with the wrap_exception decorator in
+ # the compute service.
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
# set up services
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
@@ -71,12 +81,12 @@ def dumb(*args, **kwargs):
self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound)
for x in self.EC2_VALID__IDS])
self.volume_id_exception_map = [(x,
- exception.InvalidInstanceIDMalformed)
+ exception.InvalidVolumeIDMalformed)
for x in self.EC2_MALFORMED_IDS]
self.volume_id_exception_map.extend([(x, exception.VolumeNotFound)
for x in self.EC2_VALID__IDS])
- def fake_show(meh, context, id):
+ def fake_show(meh, context, id, **kwargs):
return {'id': id,
'container_format': 'ami',
'properties': {
@@ -106,7 +116,7 @@ def tearDown(self):
super(EC2ValidateTestCase, self).tearDown()
fake.FakeImageService_reset()
- #EC2_API tests (InvalidInstanceID.Malformed)
+ # EC2_API tests (InvalidInstanceID.Malformed)
def test_console_output(self):
for ec2_id, e in self.ec2_id_exception_map:
self.assertRaises(e,
@@ -171,7 +181,7 @@ def test_detach_volume(self):
volume_id=ec2_id)
-class EC2TimestampValidationTestCase(test.TestCase):
+class EC2TimestampValidationTestCase(test.NoDBTestCase):
"""Test case for EC2 request timestamp validation."""
def test_validate_ec2_timestamp_valid(self):
@@ -215,7 +225,7 @@ def test_validate_ec2_timestamp_invalid_format(self):
def test_validate_ec2_timestamp_advanced_time(self):
- #EC2 request with Timestamp in advanced time
+ # EC2 request with Timestamp in advanced time
timestamp = timeutils.utcnow() + datetime.timedelta(seconds=250)
params = {'Timestamp': timeutils.strtime(timestamp,
"%Y-%m-%dT%H:%M:%SZ")}
@@ -252,14 +262,14 @@ def test_validate_ec2_req_not_expired(self):
def test_validate_Expires_timestamp_invalid_format(self):
- #EC2 request with invalid Expires
+ # EC2 request with invalid Expires
params = {'Expires': '2011-04-22T11:29:49'}
expired = ec2utils.is_ec2_timestamp_expired(params)
self.assertTrue(expired)
def test_validate_ec2_req_timestamp_Expires(self):
- #EC2 request with both Timestamp and Expires
+ # EC2 request with both Timestamp and Expires
params = {'Timestamp': '2011-04-22T11:29:49Z',
'Expires': timeutils.isotime()}
self.assertRaises(exception.InvalidRequest,
diff --git a/nova/tests/api/ec2/test_ec2utils.py b/nova/tests/api/ec2/test_ec2utils.py
new file mode 100644
index 0000000000..9dceb7de12
--- /dev/null
+++ b/nova/tests/api/ec2/test_ec2utils.py
@@ -0,0 +1,61 @@
+# Copyright 2014 - Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.ec2 import ec2utils
+from nova import context
+from nova import objects
+from nova import test
+
+
+class EC2UtilsTestCase(test.TestCase):
+ def setUp(self):
+ self.ctxt = context.get_admin_context()
+ ec2utils.reset_cache()
+ super(EC2UtilsTestCase, self).setUp()
+
+ def test_get_int_id_from_snapshot_uuid(self):
+ smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
+ smap.create()
+ smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
+ 'fake-uuid')
+ self.assertEqual(smap.id, smap_id)
+
+ def test_get_int_id_from_snapshot_uuid_creates_mapping(self):
+ smap_id = ec2utils.get_int_id_from_snapshot_uuid(self.ctxt,
+ 'fake-uuid')
+ smap = objects.EC2SnapshotMapping.get_by_id(self.ctxt, smap_id)
+ self.assertEqual('fake-uuid', smap.uuid)
+
+ def test_get_snapshot_uuid_from_int_id(self):
+ smap = objects.EC2SnapshotMapping(self.ctxt, uuid='fake-uuid')
+ smap.create()
+ smap_uuid = ec2utils.get_snapshot_uuid_from_int_id(self.ctxt, smap.id)
+ self.assertEqual(smap.uuid, smap_uuid)
+
+ def test_id_to_glance_id(self):
+ s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
+ s3imap.create()
+ uuid = ec2utils.id_to_glance_id(self.ctxt, s3imap.id)
+ self.assertEqual(uuid, s3imap.uuid)
+
+ def test_glance_id_to_id(self):
+ s3imap = objects.S3ImageMapping(self.ctxt, uuid='fake-uuid')
+ s3imap.create()
+ s3imap_id = ec2utils.glance_id_to_id(self.ctxt, s3imap.uuid)
+ self.assertEqual(s3imap_id, s3imap.id)
+
+ def test_glance_id_to_id_creates_mapping(self):
+ s3imap_id = ec2utils.glance_id_to_id(self.ctxt, 'fake-uuid')
+ s3imap = objects.S3ImageMapping.get_by_id(self.ctxt, s3imap_id)
+ self.assertEqual('fake-uuid', s3imap.uuid)
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
index 9a1a05a427..13b811c7f2 100644
--- a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
@@ -138,28 +138,6 @@ def _test_invalid_state(self, action, method=None, body_map=None,
self.mox.VerifyAll()
self.mox.UnsetStubs()
- def _test_not_implemented_state(self, action, method=None,
- error_text=None):
- if method is None:
- method = action
- body_map = {}
- compute_api_args_map = {}
- instance = self._stub_instance_get()
- args, kwargs = compute_api_args_map.get(action, ((), {}))
- getattr(self.compute_api, method)(self.context, instance,
- *args, **kwargs).AndRaise(
- NotImplementedError())
- self.mox.ReplayAll()
-
- res = self._make_request('/servers/%s/action' % instance['uuid'],
- {action: body_map.get(action)})
- self.assertEqual(501, res.status_int)
- self.assertIn(error_text, res.body)
- # Do these here instead of tearDown because this method is called
- # more than once for the same test case
- self.mox.VerifyAll()
- self.mox.UnsetStubs()
-
def _test_locked_instance(self, action, method=None, body_map=None,
compute_api_args_map=None):
if method is None:
@@ -218,17 +196,6 @@ def test_actions_raise_conflict_on_invalid_state(self):
# Re-mock this.
self.mox.StubOutWithMock(self.compute_api, 'get')
- def test_actions_raise_on_not_implemented(self):
- tests = [
- ('pause', 'Virt driver does not implement pause function.'),
- ('unpause', 'Virt driver does not implement unpause function.')
- ]
- for (action, error_text) in tests:
- self.mox.StubOutWithMock(self.compute_api, action)
- self._test_not_implemented_state(action, error_text=error_text)
- # Re-mock this.
- self.mox.StubOutWithMock(self.compute_api, 'get')
-
def test_actions_with_non_existed_instance(self):
actions = ['pause', 'unpause', 'suspend', 'resume',
'resetNetwork', 'injectNetworkInfo', 'lock',
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_admin_password.py b/nova/tests/api/openstack/compute/contrib/test_admin_password.py
similarity index 67%
rename from nova/tests/api/openstack/compute/plugins/v3/test_admin_password.py
rename to nova/tests/api/openstack/compute/contrib/test_admin_password.py
index 78cd3f50a6..0005d7d2e5 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_admin_password.py
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_password.py
@@ -15,7 +15,8 @@
# under the License.
import webob
-from nova.api.openstack.compute.plugins.v3 import admin_password
+from nova.api.openstack.compute.plugins.v3 import admin_password \
+ as admin_password_v21
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common import jsonutils
@@ -45,18 +46,19 @@ def fake_set_admin_password_not_implemented(self, context, instance,
raise NotImplementedError()
-class AdminPasswordTest(test.NoDBTestCase):
+class AdminPasswordTestV21(test.NoDBTestCase):
+ plugin = admin_password_v21
def setUp(self):
- super(AdminPasswordTest, self).setUp()
+ super(AdminPasswordTestV21, self).setUp()
self.stubs.Set(compute_api.API, 'set_admin_password',
fake_set_admin_password)
self.stubs.Set(compute_api.API, 'get', fake_get)
self.app = fakes.wsgi_app_v3(init_only=('servers',
- admin_password.ALIAS))
+ self.plugin.ALIAS))
- def _make_request(self, url, body):
- req = webob.Request.blank(url)
+ def _make_request(self, body):
+ req = webob.Request.blank('/v3/servers/1/action')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.content_type = 'application/json'
@@ -64,54 +66,46 @@ def _make_request(self, url, body):
return res
def test_change_password(self):
- url = '/v3/servers/1/action'
- body = {'change_password': {'admin_password': 'test'}}
- res = self._make_request(url, body)
- self.assertEqual(res.status_int, 204)
+ body = {'changePassword': {'adminPass': 'test'}}
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 202)
def test_change_password_empty_string(self):
- url = '/v3/servers/1/action'
- body = {'change_password': {'admin_password': ''}}
- res = self._make_request(url, body)
- self.assertEqual(res.status_int, 204)
+ body = {'changePassword': {'adminPass': ''}}
+ res = self._make_request(body)
+ self.assertEqual(res.status_int, 202)
def test_change_password_with_non_implement(self):
- url = '/v3/servers/1/action'
- body = {'change_password': {'admin_password': 'test'}}
+ body = {'changePassword': {'adminPass': 'test'}}
self.stubs.Set(compute_api.API, 'set_admin_password',
fake_set_admin_password_not_implemented)
- res = self._make_request(url, body)
+ res = self._make_request(body)
self.assertEqual(res.status_int, 501)
def test_change_password_with_non_existed_instance(self):
- url = '/v3/servers/1/action'
- body = {'change_password': {'admin_password': 'test'}}
+ body = {'changePassword': {'adminPass': 'test'}}
self.stubs.Set(compute_api.API, 'get', fake_get_non_existent)
- res = self._make_request(url, body)
+ res = self._make_request(body)
self.assertEqual(res.status_int, 404)
def test_change_password_with_non_string_password(self):
- url = '/v3/servers/1/action'
- body = {'change_password': {'admin_password': 1234}}
- res = self._make_request(url, body)
+ body = {'changePassword': {'adminPass': 1234}}
+ res = self._make_request(body)
self.assertEqual(res.status_int, 400)
def test_change_password_failed(self):
- url = '/v3/servers/1/action'
- body = {'change_password': {'admin_password': 'test'}}
+ body = {'changePassword': {'adminPass': 'test'}}
self.stubs.Set(compute_api.API, 'set_admin_password',
fake_set_admin_password_failed)
- res = self._make_request(url, body)
+ res = self._make_request(body)
self.assertEqual(res.status_int, 409)
def test_change_password_without_admin_password(self):
- url = '/v3/servers/1/action'
- body = {'change_password': {}}
- res = self._make_request(url, body)
+ body = {'changPassword': {}}
+ res = self._make_request(body)
self.assertEqual(res.status_int, 400)
def test_change_password_none(self):
- url = '/v3/servers/1/action'
- body = {'change_password': None}
- res = self._make_request(url, body)
+ body = {'changePassword': None}
+ res = self._make_request(body)
self.assertEqual(res.status_int, 400)
diff --git a/nova/tests/api/openstack/compute/contrib/test_agents.py b/nova/tests/api/openstack/compute/contrib/test_agents.py
index 5036af93d7..0a3aae3104 100644
--- a/nova/tests/api/openstack/compute/contrib/test_agents.py
+++ b/nova/tests/api/openstack/compute/contrib/test_agents.py
@@ -18,30 +18,31 @@
from nova import context
from nova import db
from nova.db.sqlalchemy import models
+from nova import exception
from nova import test
fake_agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
- 'url': 'xxx://xxxx/xxx/xxx1',
+ 'url': 'http://example.com/path/to/resource1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
- 'url': 'xxx://xxxx/xxx/xxx2',
+ 'url': 'http://example.com/path/to/resource2',
'md5hash': 'add6bb58e139be103324d04d82d8f547',
'id': 3},
{'hypervisor': 'xen', 'os': 'win',
'architecture': 'power',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx3',
+ 'url': 'http://example.com/path/to/resource3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'id': 4},
]
@@ -105,19 +106,19 @@ def test_agents_create(self):
'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
response = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1}}
res_dict = self.controller.create(req, body)
self.assertEqual(res_dict, response)
- def _test_agents_create_with_invalid_length(self, key):
+ def _test_agents_create_key_error(self, key):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
@@ -125,6 +126,64 @@ def _test_agents_create_with_invalid_length(self, key):
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ body['agent'].pop(key)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_agents_create_without_hypervisor(self):
+ self._test_agents_create_key_error('hypervisor')
+
+ def test_agents_create_without_os(self):
+ self._test_agents_create_key_error('os')
+
+ def test_agents_create_without_architecture(self):
+ self._test_agents_create_key_error('architecture')
+
+ def test_agents_create_without_version(self):
+ self._test_agents_create_key_error('version')
+
+ def test_agents_create_without_url(self):
+ self._test_agents_create_key_error('url')
+
+ def test_agents_create_without_md5hash(self):
+ self._test_agents_create_key_error('md5hash')
+
+ def test_agents_create_with_wrong_type(self):
+ req = FakeRequest()
+ body = {'agent': None}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_agents_create_with_empty_type(self):
+ req = FakeRequest()
+ body = {}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, body)
+
+ def test_agents_create_with_existed_agent(self):
+ def fake_agent_build_create_with_exited_agent(context, values):
+ raise exception.AgentBuildExists(**values)
+
+ self.stubs.Set(db, 'agent_build_create',
+ fake_agent_build_create_with_exited_agent)
+ req = FakeRequest()
+ body = {'agent': {'hypervisor': 'kvm',
+ 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'xxx://xxxx/xxx/xxx',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ self.assertRaises(webob.exc.HTTPConflict, self.controller.create, req,
+ body=body)
+
+ def _test_agents_create_with_invalid_length(self, key):
+ req = FakeRequest()
+ body = {'agent': {'hypervisor': 'kvm',
+ 'os': 'win',
+ 'architecture': 'x86',
+ 'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
body['agent'][key] = 'x' * 256
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
@@ -157,25 +216,25 @@ def test_agents_list(self):
agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
- 'url': 'xxx://xxxx/xxx/xxx1',
+ 'url': 'http://example.com/path/to/resource1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
- 'url': 'xxx://xxxx/xxx/xxx2',
+ 'url': 'http://example.com/path/to/resource2',
'md5hash': 'add6bb58e139be103324d04d82d8f547',
'agent_id': 3},
{'hypervisor': 'xen', 'os': 'win',
'architecture': 'power',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx3',
+ 'url': 'http://example.com/path/to/resource3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'agent_id': 4},
]
@@ -187,13 +246,13 @@ def test_agents_list_with_hypervisor(self):
response = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
- 'url': 'xxx://xxxx/xxx/xxx1',
+ 'url': 'http://example.com/path/to/resource1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
]
@@ -202,20 +261,58 @@ def test_agents_list_with_hypervisor(self):
def test_agents_update(self):
req = FakeRequest()
body = {'para': {'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
response = {'agent': {'agent_id': 1,
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
res_dict = self.controller.update(req, 1, body)
self.assertEqual(res_dict, response)
- def _test_agents_update_with_invalid_length(self, key):
+ def _test_agents_update_key_error(self, key):
req = FakeRequest()
body = {'para': {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ body['para'].pop(key)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, 1, body)
+
+ def test_agents_update_without_version(self):
+ self._test_agents_update_key_error('version')
+
+ def test_agents_update_without_url(self):
+ self._test_agents_update_key_error('url')
+
+ def test_agents_update_without_md5hash(self):
+ self._test_agents_update_key_error('md5hash')
+
+ def test_agents_update_with_wrong_type(self):
+ req = FakeRequest()
+ body = {'agent': None}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, 1, body)
+
+ def test_agents_update_with_empty(self):
+ req = FakeRequest()
+ body = {}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, 1, body)
+
+ def test_agents_update_value_error(self):
+ req = FakeRequest()
+ body = {'para': {'version': '7.0',
+ 'url': 1111,
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req, 1, body)
+
+ def _test_agents_update_with_invalid_length(self, key):
+ req = FakeRequest()
+ body = {'para': {'version': '7.0',
+ 'url': 'http://example.com/path/to/resource',
+ 'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
body['para'][key] = 'x' * 256
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, 1, body)
diff --git a/nova/tests/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
index 5e8d19e85b..40150e9eb9 100644
--- a/nova/tests/api/openstack/compute/contrib/test_aggregates.py
+++ b/nova/tests/api/openstack/compute/contrib/test_aggregates.py
@@ -396,12 +396,20 @@ def test_add_host_with_missing_host(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"add_host": {"asdf": "asdf"}})
+ def test_add_host_with_invalid_format_host(self):
+ self.assertRaises(exc.HTTPBadRequest, self.controller.action,
+ self.req, "1", body={"add_host": {"host": "a" * 300}})
+
+ def test_add_host_with_multiple_hosts(self):
+ self.assertRaises(exc.HTTPBadRequest, self.controller.action,
+ self.req, "1", body={"add_host": {"host": ["host1", "host2"]}})
+
def test_add_host_raises_key_error(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise KeyError
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
- #NOTE(mtreinish) The check for a KeyError here is to ensure that
+ # NOTE(mtreinish) The check for a KeyError here is to ensure that
# if add_host_to_aggregate() raises a KeyError it propagates. At
# one point the api code would mask the error as a HTTPBadRequest.
# This test is to ensure that this doesn't occur again.
@@ -466,6 +474,11 @@ def test_remove_host_with_missing_host(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"asdf": "asdf"})
+ def test_remove_host_with_multiple_hosts(self):
+ self.assertRaises(exc.HTTPBadRequest, self.controller.action,
+ self.req, "1", body={"remove_host": {"host":
+ ["host1", "host2"]}})
+
def test_remove_host_with_extra_param(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"remove_host": {"asdf": "asdf",
@@ -488,6 +501,18 @@ def stub_update_aggregate(context, aggregate, values):
self.assertEqual(AGGREGATE, result["aggregate"])
+ def test_set_metadata_delete(self):
+ body = {"set_metadata": {"metadata": {"foo": None}}}
+
+ with mock.patch.object(self.controller.api,
+ 'update_aggregate_metadata') as mocked:
+ mocked.return_value = AGGREGATE
+ result = self.controller.action(self.req, "1", body=body)
+
+ self.assertEqual(AGGREGATE, result["aggregate"])
+ mocked.assert_called_once_with(self.context, "1",
+ body["set_metadata"]["metadata"])
+
def test_set_metadata_no_admin(self):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller._set_metadata,
diff --git a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py
index 92792670c4..ee411d3896 100644
--- a/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py
+++ b/nova/tests/api/openstack/compute/contrib/test_attach_interfaces.py
@@ -16,7 +16,10 @@
import mock
from oslo.config import cfg
-from nova.api.openstack.compute.contrib import attach_interfaces
+from nova.api.openstack.compute.contrib import attach_interfaces \
+ as attach_interfaces_v2
+from nova.api.openstack.compute.plugins.v3 import attach_interfaces \
+ as attach_interfaces_v3
from nova.compute import api as compute_api
from nova import context
from nova import exception
@@ -41,6 +44,7 @@
FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444'
FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555'
FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666'
+FAKE_BAD_NET_ID = '00000000-0000-0000-0000-000000000000'
port_data1 = {
"id": FAKE_PORT_ID1,
@@ -96,7 +100,7 @@ def fake_attach_interface(self, context, instance, network_id, port_id,
# if no network_id is given when add a port to an instance, use the
# first default network.
network_id = fake_networks[0]
- if network_id == 'bad_id':
+ if network_id == FAKE_BAD_NET_ID:
raise exception.NetworkNotFound(network_id=network_id)
if not port_id:
port_id = ports[fake_networks.index(network_id)]['id']
@@ -118,9 +122,12 @@ def fake_get_instance(self, *args, **kwargs):
return {}
-class InterfaceAttachTests(test.NoDBTestCase):
+class InterfaceAttachTestsV21(test.NoDBTestCase):
+ url = '/v3/os-interfaces'
+ controller_cls = attach_interfaces_v3.InterfaceAttachmentController
+
def setUp(self):
- super(InterfaceAttachTests, self).setUp()
+ super(InterfaceAttachTestsV21, self).setUp()
self.flags(auth_strategy=None, group='neutron')
self.flags(url='http://anyhost/', group='neutron')
self.flags(url_timeout=30, group='neutron')
@@ -135,40 +142,69 @@ def setUp(self):
'port_state': port_data1['status'],
'fixed_ips': port_data1['fixed_ips'],
}}
+ self.attachments = self.controller_cls()
+
+ @mock.patch.object(compute_api.API, 'get',
+ side_effect=exception.InstanceNotFound(instance_id=''))
+ def _test_instance_not_found(self, url, func, args, mock_get, kwargs=None,
+ method='GET'):
+ req = webob.Request.blank(url)
+ req.method = method
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ if not kwargs:
+ kwargs = {}
+ self.assertRaises(exc.HTTPNotFound, func, req, *args, **kwargs)
+
+ def test_show_instance_not_found(self):
+ self._test_instance_not_found(self.url + 'fake',
+ self.attachments.show, ('fake', 'fake'))
+
+ def test_index_instance_not_found(self):
+ self._test_instance_not_found(self.url,
+ self.attachments.index, ('fake', ))
+
+ def test_detach_interface_instance_not_found(self):
+ self._test_instance_not_found(self.url + '/fake',
+ self.attachments.delete,
+ ('fake', 'fake'), method='DELETE')
+
+ def test_attach_interface_instance_not_found(self):
+ self._test_instance_not_found(
+ '/v2/fake/os-interfaces', self.attachments.create, ('fake', ),
+ kwargs={'body': {'interfaceAttachment': {}}}, method='POST')
def test_show(self):
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v2/fake/os-interfaces/show')
+ req = webob.Request.blank(self.url + '/show')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
- result = attachments.show(req, FAKE_UUID1, FAKE_PORT_ID1)
+ result = self.attachments.show(req, FAKE_UUID1, FAKE_PORT_ID1)
self.assertEqual(self.expected_show, result)
def test_show_invalid(self):
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v2/fake/os-interfaces/show')
+ req = webob.Request.blank(self.url + '/show')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
- attachments.show, req, FAKE_UUID2, FAKE_PORT_ID1)
+ self.attachments.show, req, FAKE_UUID2,
+ FAKE_PORT_ID1)
def test_delete(self):
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v2/fake/os-interfaces/delete')
+ req = webob.Request.blank(self.url + '/delete')
req.method = 'DELETE'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
- result = attachments.delete(req, FAKE_UUID1, FAKE_PORT_ID1)
+ result = self.attachments.delete(req, FAKE_UUID1, FAKE_PORT_ID1)
self.assertEqual('202 Accepted', result.status)
def test_detach_interface_instance_locked(self):
@@ -179,15 +215,14 @@ def fake_detach_interface_from_locked_server(self, context,
self.stubs.Set(compute_api.API,
'detach_interface',
fake_detach_interface_from_locked_server)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v2/fake/os-interfaces/delete')
+ req = webob.Request.blank(self.url + '/delete')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPConflict,
- attachments.delete,
+ self.attachments.delete,
req,
FAKE_UUID1,
FAKE_PORT_ID1)
@@ -195,15 +230,14 @@ def fake_detach_interface_from_locked_server(self, context,
def test_delete_interface_not_found(self):
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v2/fake/os-interfaces/delete')
+ req = webob.Request.blank(self.url + '/delete')
req.method = 'DELETE'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
- attachments.delete,
+ self.attachments.delete,
req,
FAKE_UUID1,
'invaid-port-id')
@@ -216,80 +250,107 @@ def fake_attach_interface_to_locked_server(self, context,
self.stubs.Set(compute_api.API,
'attach_interface',
fake_attach_interface_to_locked_server)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v2/fake/os-interfaces/attach')
+ req = webob.Request.blank(self.url + '/attach')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPConflict,
- attachments.create, req, FAKE_UUID1,
- jsonutils.loads(req.body))
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
def test_attach_interface_without_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v2/fake/os-interfaces/attach')
+ req = webob.Request.blank(self.url + '/attach')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
- result = attachments.create(req, FAKE_UUID1, jsonutils.loads(req.body))
+ result = self.attachments.create(req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID1)
def test_attach_interface_with_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v2/fake/os-interfaces/attach')
+ req = webob.Request.blank(self.url + '/attach')
req.method = 'POST'
req.body = jsonutils.dumps({'interfaceAttachment':
{'net_id': FAKE_NET_ID2}})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
- result = attachments.create(req, FAKE_UUID1, jsonutils.loads(req.body))
+ result = self.attachments.create(req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID2)
- def test_attach_interface_with_port_and_network_id(self):
+ def _attach_interface_bad_request_case(self, body):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v2/fake/os-interfaces/attach')
+ req = webob.Request.blank(self.url + '/attach')
req.method = 'POST'
- req.body = jsonutils.dumps({'interfaceAttachment':
- {'port_id': FAKE_PORT_ID1,
- 'net_id': FAKE_NET_ID2}})
+ req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPBadRequest,
- attachments.create, req, FAKE_UUID1,
- jsonutils.loads(req.body))
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+
+ def test_attach_interface_with_port_and_network_id(self):
+ body = {
+ 'interfaceAttachment': {
+ 'port_id': FAKE_PORT_ID1,
+ 'net_id': FAKE_NET_ID2
+ }
+ }
+ self._attach_interface_bad_request_case(body)
def test_attach_interface_with_invalid_data(self):
+ body = {
+ 'interfaceAttachment': {
+ 'net_id': FAKE_BAD_NET_ID
+ }
+ }
+ self._attach_interface_bad_request_case(body)
+
+ def test_attach_interface_with_invalid_state(self):
+ def fake_attach_interface_invalid_state(*args, **kwargs):
+ raise exception.InstanceInvalidState(
+ instance_uuid='', attr='', state='',
+ method='attach_interface')
+
self.stubs.Set(compute_api.API, 'attach_interface',
- fake_attach_interface)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v2/fake/os-interfaces/attach')
+ fake_attach_interface_invalid_state)
+ req = webob.Request.blank(self.url + '/attach')
req.method = 'POST'
req.body = jsonutils.dumps({'interfaceAttachment':
- {'net_id': 'bad_id'}})
+ {'net_id': FAKE_NET_ID1}})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
- self.assertRaises(exc.HTTPBadRequest,
- attachments.create, req, FAKE_UUID1,
- jsonutils.loads(req.body))
+ self.assertRaises(exc.HTTPConflict,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+ def test_detach_interface_with_invalid_state(self):
+ def fake_detach_interface_invalid_state(*args, **kwargs):
+ raise exception.InstanceInvalidState(
+ instance_uuid='', attr='', state='',
+ method='detach_interface')
-class InterfaceAttachTestsWithMock(test.NoDBTestCase):
- def setUp(self):
- super(InterfaceAttachTestsWithMock, self).setUp()
- self.flags(auth_strategy=None, group='neutron')
- self.flags(url='http://anyhost/', group='neutron')
- self.flags(url_timeout=30, group='neutron')
- self.context = context.get_admin_context()
+ self.stubs.Set(compute_api.API, 'detach_interface',
+ fake_detach_interface_invalid_state)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'DELETE'
+ req.body = jsonutils.dumps({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exc.HTTPConflict,
+ self.attachments.delete,
+ req,
+ FAKE_UUID1,
+ FAKE_NET_ID1)
@mock.patch.object(compute_api.API, 'get')
@mock.patch.object(compute_api.API, 'attach_interface')
@@ -299,15 +360,53 @@ def test_attach_interface_fixed_ip_already_in_use(self,
get_mock.side_effect = fake_get_instance
attach_mock.side_effect = exception.FixedIpAlreadyInUse(
address='10.0.2.2', instance_uuid=FAKE_UUID1)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v2/fake/os-interfaces/attach')
+ req = webob.Request.blank(self.url + '/attach')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPBadRequest,
- attachments.create, req, FAKE_UUID1,
- jsonutils.loads(req.body))
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
attach_mock.assert_called_once_with(self.context, {}, None, None, None)
get_mock.assert_called_once_with(self.context, FAKE_UUID1,
- want_objects=True)
+ want_objects=True,
+ expected_attrs=None)
+
+ def _test_attach_interface_with_invalid_parameter(self, param):
+ self.stubs.Set(compute_api.API, 'attach_interface',
+ fake_attach_interface)
+ req = webob.Request.blank(self.url + '/attach')
+ req.method = 'POST'
+ req.body = jsonutils.dumps({'interface_attachment': param})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exception.ValidationError,
+ self.attachments.create, req, FAKE_UUID1,
+ body=jsonutils.loads(req.body))
+
+ def test_attach_interface_instance_with_non_uuid_net_id(self):
+ param = {'net_id': 'non_uuid'}
+ self._test_attach_interface_with_invalid_parameter(param)
+
+ def test_attach_interface_instance_with_non_uuid_port_id(self):
+ param = {'port_id': 'non_uuid'}
+ self._test_attach_interface_with_invalid_parameter(param)
+
+ def test_attach_interface_instance_with_non_array_fixed_ips(self):
+ param = {'fixed_ips': 'non_array'}
+ self._test_attach_interface_with_invalid_parameter(param)
+
+
+class InterfaceAttachTestsV2(InterfaceAttachTestsV21):
+ url = '/v2/fake/os-interfaces'
+ controller_cls = attach_interfaces_v2.InterfaceAttachmentController
+
+ def test_attach_interface_instance_with_non_uuid_net_id(self):
+ pass
+
+ def test_attach_interface_instance_with_non_uuid_port_id(self):
+ pass
+
+ def test_attach_interface_instance_with_non_array_fixed_ips(self):
+ pass
diff --git a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
index 3bfd0748a0..1280e066f9 100644
--- a/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
+++ b/nova/tests/api/openstack/compute/contrib/test_availability_zone.py
@@ -18,26 +18,36 @@
import webob
from nova.api.openstack.compute.contrib import availability_zone
+from nova.api.openstack.compute import servers
+from nova.api.openstack import extensions
from nova import availability_zones
+from nova.compute import api as compute_api
+from nova.compute import flavors
from nova import context
from nova import db
from nova.openstack.common import jsonutils
from nova import servicegroup
from nova import test
from nova.tests.api.openstack import fakes
+from nova.tests import fake_instance
+from nova.tests.image import fake
from nova.tests import matchers
+from nova.tests.objects import test_service
+
+FAKE_UUID = fakes.FAKE_UUID
def fake_service_get_all(context, disabled=None):
def __fake_service(binary, availability_zone,
created_at, updated_at, host, disabled):
- return {'binary': binary,
- 'availability_zone': availability_zone,
- 'available_zones': availability_zone,
- 'created_at': created_at,
- 'updated_at': updated_at,
- 'host': host,
- 'disabled': disabled}
+ return dict(test_service.fake_service,
+ binary=binary,
+ availability_zone=availability_zone,
+ available_zones=availability_zone,
+ created_at=created_at,
+ updated_at=updated_at,
+ host=host,
+ disabled=disabled)
if disabled:
return [__fake_service("nova-compute", "zone-2",
@@ -226,6 +236,138 @@ def test_availability_zone_detail_no_services(self):
matchers.DictMatches(expected_response))
+class ServersControllerCreateTest(test.TestCase):
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTest, self).setUp()
+
+ self.flags(verbose=True,
+ enable_instance_password=True)
+ self.instance_cache_num = 0
+
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers.Controller(self.ext_mgr)
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': FAKE_UUID,
+ 'instance_type': dict(inst_type),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ })
+ return instance
+
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(db, 'instance_create', instance_create)
+
+ def _test_create_extra(self, params):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ server.update(params)
+ body = dict(server=server)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ server = self.controller.create(req, body=body).obj['server']
+
+ def test_create_instance_with_availability_zone_disabled(self):
+ availability_zone = [{'availability_zone': 'foo'}]
+ params = {'availability_zone': availability_zone}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self.assertIsNone(kwargs['availability_zone'])
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params)
+
+ def test_create_instance_with_availability_zone(self):
+ self.ext_mgr.extensions = {'os-availability-zone': 'fake'}
+
+ def create(*args, **kwargs):
+ self.assertIn('availability_zone', kwargs)
+ self.assertEqual('nova', kwargs['availability_zone'])
+ return old_create(*args, **kwargs)
+
+ old_create = compute_api.API.create
+ self.stubs.Set(compute_api.API, 'create', create)
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/v2/fake/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'config_drive_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'availability_zone': 'nova',
+ },
+ }
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ admin_context = context.get_admin_context()
+ db.service_create(admin_context, {'host': 'host1_zones',
+ 'binary': "nova-compute",
+ 'topic': 'compute',
+ 'report_count': 0})
+ agg = db.aggregate_create(admin_context,
+ {'name': 'agg1'}, {'availability_zone': 'nova'})
+ db.aggregate_host_add(admin_context, agg['id'], 'host1_zones')
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+ def test_create_instance_without_availability_zone(self):
+ self.ext_mgr.extensions = {'os-availability-zone': 'fake'}
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = 'http://localhost/v2/fake/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'config_drive_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ },
+ }
+
+ req = fakes.HTTPRequest.blank('/v2/fake/servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+
class AvailabilityZoneSerializerTest(test.NoDBTestCase):
def test_availability_zone_index_detail_serializer(self):
def _verify_zone(zone_dict, tree):
diff --git a/nova/tests/api/openstack/compute/contrib/test_cells.py b/nova/tests/api/openstack/compute/contrib/test_cells.py
index 414ad33213..3fc6bfb525 100644
--- a/nova/tests/api/openstack/compute/contrib/test_cells.py
+++ b/nova/tests/api/openstack/compute/contrib/test_cells.py
@@ -133,7 +133,7 @@ def test_get_cell_by_name(self):
self.assertEqual(cell['rpc_host'], 'r1.example.org')
self.assertNotIn('password', cell)
- def test_cell_delete(self):
+ def _cell_delete(self):
call_info = {'delete_called': 0}
def fake_cell_delete(inst, context, cell_name):
@@ -143,9 +143,20 @@ def fake_cell_delete(inst, context, cell_name):
self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete)
req = self._get_request("cells/cell999")
+ req.environ['nova.context'] = self.context
self.controller.delete(req, 'cell999')
self.assertEqual(call_info['delete_called'], 1)
+ def test_cell_delete(self):
+ # Test cell delete with just cell policy
+ rules = {"default": "is_admin:true",
+ "compute_extension:cells": "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_delete()
+
+ def test_cell_delete_with_delete_policy(self):
+ self._cell_delete()
+
def test_delete_bogus_cell_raises(self):
def fake_cell_delete(inst, context, cell_name):
return 0
@@ -157,7 +168,19 @@ def fake_cell_delete(inst, context, cell_name):
self.assertRaises(exc.HTTPNotFound, self.controller.delete, req,
'cell999')
- def test_cell_create_parent(self):
+ def test_cell_delete_fails_for_invalid_policy(self):
+ def fake_cell_delete(inst, context, cell_name):
+ pass
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete)
+
+ req = self._get_request("cells/cell999")
+ req.environ['nova.context'] = self.context
+ req.environ["nova.context"].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.delete, req, 'cell999')
+
+ def _cell_create_parent(self):
body = {'cell': {'name': 'meow',
'username': 'fred',
'password': 'fubar',
@@ -167,6 +190,7 @@ def test_cell_create_parent(self):
'is_parent': False}}
req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
res_dict = self.controller.create(req, body)
cell = res_dict['cell']
@@ -177,7 +201,17 @@ def test_cell_create_parent(self):
self.assertNotIn('password', cell)
self.assertNotIn('is_parent', cell)
- def test_cell_create_child(self):
+ def test_cell_create_parent(self):
+ # Test create with just cells policy
+ rules = {"default": "is_admin:true",
+ "compute_extension:cells": "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_create_parent()
+
+ def test_cell_create_parent_with_create_policy(self):
+ self._cell_create_parent()
+
+ def _cell_create_child(self):
body = {'cell': {'name': 'meow',
'username': 'fred',
'password': 'fubar',
@@ -185,6 +219,7 @@ def test_cell_create_child(self):
'type': 'child'}}
req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
res_dict = self.controller.create(req, body)
cell = res_dict['cell']
@@ -195,6 +230,16 @@ def test_cell_create_child(self):
self.assertNotIn('password', cell)
self.assertNotIn('is_parent', cell)
+ def test_cell_create_child(self):
+ # Test create with just cells policy
+ rules = {"default": "is_admin:true",
+ "compute_extension:cells": "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_create_child()
+
+ def test_cell_create_child_with_create_policy(self):
+ self._cell_create_child()
+
def test_cell_create_no_name_raises(self):
body = {'cell': {'username': 'moocow',
'password': 'secret',
@@ -202,6 +247,7 @@ def test_cell_create_no_name_raises(self):
'type': 'parent'}}
req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPBadRequest,
self.controller.create, req, body)
@@ -213,6 +259,7 @@ def test_cell_create_name_empty_string_raises(self):
'type': 'parent'}}
req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPBadRequest,
self.controller.create, req, body)
@@ -224,6 +271,7 @@ def test_cell_create_name_with_bang_raises(self):
'type': 'parent'}}
req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPBadRequest,
self.controller.create, req, body)
@@ -235,6 +283,7 @@ def test_cell_create_name_with_dot_raises(self):
'type': 'parent'}}
req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPBadRequest,
self.controller.create, req, body)
@@ -246,14 +295,24 @@ def test_cell_create_name_with_invalid_type_raises(self):
'type': 'invalid'}}
req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPBadRequest,
self.controller.create, req, body)
- def test_cell_update(self):
+ def test_cell_create_fails_for_invalid_policy(self):
+ body = {'cell': {'name': 'fake'}}
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ req.environ['nova.context'].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.create, req, body)
+
+ def _cell_update(self):
body = {'cell': {'username': 'zeb',
'password': 'sneaky'}}
req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
res_dict = self.controller.update(req, 'cell1', body)
cell = res_dict['cell']
@@ -262,12 +321,31 @@ def test_cell_update(self):
self.assertEqual(cell['username'], 'zeb')
self.assertNotIn('password', cell)
+ def test_cell_update(self):
+ # Test cell update with just cell policy
+ rules = {"default": "is_admin:true",
+ "compute_extension:cells": "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_update()
+
+ def test_cell_update_with_update_policy(self):
+ self._cell_update()
+
+ def test_cell_update_fails_for_invalid_policy(self):
+ body = {'cell': {'name': 'got_changed'}}
+ req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
+ req.environ['nova.context'].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.create, req, body)
+
def test_cell_update_empty_name_raises(self):
body = {'cell': {'name': '',
'username': 'zeb',
'password': 'sneaky'}}
req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPBadRequest,
self.controller.update, req, 'cell1', body)
@@ -277,6 +355,7 @@ def test_cell_update_invalid_type_raises(self):
'password': 'sneaky'}}
req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPBadRequest,
self.controller.update, req, 'cell1', body)
@@ -284,6 +363,7 @@ def test_cell_update_without_type_specified(self):
body = {'cell': {'username': 'wingwj'}}
req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
res_dict = self.controller.update(req, 'cell1', body)
cell = res_dict['cell']
@@ -297,10 +377,12 @@ def test_cell_update_with_type_specified(self):
body2 = {'cell': {'username': 'wingwj', 'type': 'parent'}}
req1 = self._get_request("cells/cell1")
+ req1.environ['nova.context'] = self.context
res_dict1 = self.controller.update(req1, 'cell1', body1)
cell1 = res_dict1['cell']
req2 = self._get_request("cells/cell2")
+ req2.environ['nova.context'] = self.context
res_dict2 = self.controller.update(req2, 'cell2', body2)
cell2 = res_dict2['cell']
@@ -406,6 +488,7 @@ def sync_instances(self, context, **kwargs):
self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)
req = self._get_request("cells/sync_instances")
+ req.environ['nova.context'] = self.context
body = {}
self.controller.sync_instances(req, body=body)
self.assertIsNone(call_info['project_id'])
@@ -455,6 +538,20 @@ def sync_instances(self, context, **kwargs):
self.assertRaises(exc.HTTPBadRequest,
self.controller.sync_instances, req, body=body)
+ def test_sync_instances_fails_for_invalid_policy(self):
+ def sync_instances(self, context, **kwargs):
+ pass
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)
+
+ req = self._get_request("cells/sync_instances")
+ req.environ['nova.context'] = self.context
+ req.environ['nova.context'].is_admin = False
+
+ body = {}
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.sync_instances, req, body)
+
def test_cells_disabled(self):
self.flags(enable=False, group='cells')
diff --git a/nova/tests/api/openstack/compute/contrib/test_certificates.py b/nova/tests/api/openstack/compute/contrib/test_certificates.py
index 12cc06f710..367a48aad6 100644
--- a/nova/tests/api/openstack/compute/contrib/test_certificates.py
+++ b/nova/tests/api/openstack/compute/contrib/test_certificates.py
@@ -15,10 +15,16 @@
# under the License.
from lxml import etree
+import mock
import mox
+from webob import exc
from nova.api.openstack.compute.contrib import certificates
+from nova.cert import rpcapi
from nova import context
+from nova import exception
+from nova.openstack.common import policy as common_policy
+from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
@@ -49,6 +55,18 @@ def test_certificates_show_root(self):
response = {'certificate': {'data': 'fakeroot', 'private_key': None}}
self.assertEqual(res_dict, response)
+ def test_certificates_show_policy_failed(self):
+ rules = {
+ "compute_extension:certificates":
+ common_policy.parse_rule("!")
+ }
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank('/v2/fake/os-certificates/root')
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.show, req, 'root')
+ self.assertIn("compute_extension:certificates",
+ exc.format_message())
+
def test_certificates_create_certificate(self):
self.mox.StubOutWithMock(self.controller.cert_rpcapi,
'generate_x509_cert')
@@ -69,6 +87,27 @@ def test_certificates_create_certificate(self):
}
self.assertEqual(res_dict, response)
+ def test_certificates_create_policy_failed(self):
+ rules = {
+ "compute_extension:certificates":
+ common_policy.parse_rule("!")
+ }
+ policy.set_rules(rules)
+ req = fakes.HTTPRequest.blank('/v2/fake/os-certificates/')
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.create, req)
+ self.assertIn("compute_extension:certificates",
+ exc.format_message())
+
+ @mock.patch.object(rpcapi.CertAPI, 'fetch_ca',
+ side_effect=exception.CryptoCAFileNotFound(project='fake'))
+ def test_non_exist_certificates_show(self, mock_fetch_ca):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-certificates/root')
+ self.assertRaises(
+ exc.HTTPNotFound,
+ self.controller.show,
+ req, 'root')
+
class CertificatesSerializerTest(test.NoDBTestCase):
def test_index_serializer(self):
diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py
index 1284ed8f39..b17722902d 100644
--- a/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py
+++ b/nova/tests/api/openstack/compute/contrib/test_cloudpipe_update.py
@@ -57,7 +57,7 @@ def test_cloudpipe_configure_project(self):
def test_cloudpipe_configure_project_bad_url(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-projectx')
- body = {"vpn_ip": "1.2.3.4", "vpn_port": 222}
+ body = {"configure_project": {"vpn_ip": "1.2.3.4", "vpn_port": 222}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req,
'configure-projectx', body)
@@ -65,7 +65,16 @@ def test_cloudpipe_configure_project_bad_url(self):
def test_cloudpipe_configure_project_bad_data(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-cloudpipe/configure-project')
- body = {"vpn_ipxx": "1.2.3.4", "vpn_port": 222}
- self.assertRaises(webob.exc.HTTPUnprocessableEntity,
+ body = {"configure_project": {"vpn_ipxx": "1.2.3.4", "vpn_port": 222}}
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.update, req,
+ 'configure-project', body)
+
+ def test_cloudpipe_configure_project_bad_vpn_port(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-cloudpipe/configure-project')
+ body = {"configure_project": {"vpn_ipxx": "1.2.3.4",
+ "vpn_port": "foo"}}
+ self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req,
'configure-project', body)
diff --git a/nova/tests/api/openstack/compute/contrib/test_config_drive.py b/nova/tests/api/openstack/compute/contrib/test_config_drive.py
index 1b68e724b4..a0242d7544 100644
--- a/nova/tests/api/openstack/compute/contrib/test_config_drive.py
+++ b/nova/tests/api/openstack/compute/contrib/test_config_drive.py
@@ -13,37 +13,57 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
+
+from oslo.config import cfg
import webob
-from nova.api.openstack.compute.contrib import config_drive
+from nova.api.openstack.compute.contrib import config_drive as config_drive_v2
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import config_drive \
+ as config_drive_v21
+from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
+from nova.api.openstack.compute import servers as servers_v2
+from nova.api.openstack import extensions
+from nova.compute import api as compute_api
+from nova.compute import flavors
from nova import db
+from nova import exception
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
-import nova.tests.image.fake
+from nova.tests import fake_instance
+from nova.tests.image import fake
+
+
+CONF = cfg.CONF
-class ConfigDriveTest(test.TestCase):
+class ConfigDriveTestV21(test.TestCase):
+ base_url = '/v3/servers/'
+
+ def _setup_wsgi(self):
+ self.app = fakes.wsgi_app_v3(init_only=('servers', 'os-config-drive'))
+
+ def _get_config_drive_controller(self):
+ return config_drive_v21.ConfigDriveController()
def setUp(self):
- super(ConfigDriveTest, self).setUp()
- self.Controller = config_drive.Controller()
+ super(ConfigDriveTestV21, self).setUp()
+ self.Controller = self._get_config_drive_controller()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
- nova.tests.image.fake.stub_out_image_service(self.stubs)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Config_drive'])
+ fake.stub_out_image_service(self.stubs)
+ self._setup_wsgi()
def test_show(self):
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get())
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get())
- req = webob.Request.blank('/v2/fake/servers/1')
+ req = webob.Request.blank(self.base_url + '1')
req.headers['Content-Type'] = 'application/json'
- response = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ response = req.get_response(self.app)
self.assertEqual(response.status_int, 200)
res_dict = jsonutils.loads(response.body)
self.assertIn('config_drive', res_dict['server'])
@@ -51,9 +71,192 @@ def test_show(self):
def test_detail_servers(self):
self.stubs.Set(db, 'instance_get_all_by_filters',
fakes.fake_instance_get_all_by_filters())
- req = fakes.HTTPRequest.blank('/v2/fake/servers/detail')
- res = req.get_response(fakes.wsgi_app(init_only=('servers,')))
+ req = fakes.HTTPRequest.blank(self.base_url + 'detail')
+ res = req.get_response(self.app)
server_dicts = jsonutils.loads(res.body)['servers']
self.assertNotEqual(len(server_dicts), 0)
for server_dict in server_dicts:
self.assertIn('config_drive', server_dict)
+
+
+class ConfigDriveTestV2(ConfigDriveTestV21):
+ base_url = '/v2/fake/servers/'
+
+ def _get_config_drive_controller(self):
+ return config_drive_v2.Controller()
+
+ def _setup_wsgi(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Config_drive'])
+ self.app = fakes.wsgi_app(init_only=('servers',))
+
+
+class ServersControllerCreateTestV21(test.TestCase):
+ base_url = '/v3/'
+ bad_request = exception.ValidationError
+
+ def _set_up_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ self.controller = servers_v21.ServersController(
+ extension_info=ext_info)
+ CONF.set_override('extensions_blacklist',
+ 'os-config-drive',
+ 'osapi_v3')
+ self.no_config_drive_controller = servers_v21.ServersController(
+ extension_info=ext_info)
+
+ def _verfiy_config_drive(self, **kwargs):
+ self.assertNotIn('config_drive', kwargs)
+
+ def _initialize_extension(self):
+ pass
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTestV21, self).setUp()
+
+ self.instance_cache_num = 0
+ self._set_up_controller()
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': fakes.FAKE_UUID,
+ 'instance_type': dict(inst_type),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ })
+
+ return instance
+
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(db, 'instance_create', instance_create)
+
+ def _test_create_extra(self, params, override_controller):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ server.update(params)
+ body = dict(server=server)
+ req = fakes.HTTPRequest.blank(self.base_url + 'servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ if override_controller is not None:
+ server = override_controller.create(req, body=body).obj['server']
+ else:
+ server = self.controller.create(req, body=body).obj['server']
+
+ def test_create_instance_with_config_drive_disabled(self):
+ params = {'config_drive': "False"}
+ old_create = compute_api.API.create
+
+ def create(*args, **kwargs):
+ self._verfiy_config_drive(**kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(compute_api.API, 'create', create)
+ self._test_create_extra(params,
+ override_controller=self.no_config_drive_controller)
+
+ def _create_instance_body_of_config_drive(self, param):
+ self._initialize_extension()
+
+ def create(*args, **kwargs):
+ self.assertIn('config_drive', kwargs)
+ return old_create(*args, **kwargs)
+
+ old_create = compute_api.API.create
+ self.stubs.Set(compute_api.API, 'create', create)
+ image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ flavor_ref = ('http://localhost' + self.base_url + 'flavors/3')
+ body = {
+ 'server': {
+ 'name': 'config_drive_test',
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
+ 'config_drive': param,
+ },
+ }
+
+ req = fakes.HTTPRequest.blank(self.base_url + 'servers')
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ return req, body
+
+ def test_create_instance_with_config_drive(self):
+ param = True
+ req, body = self._create_instance_body_of_config_drive(param)
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+ def test_create_instance_with_config_drive_as_boolean_string(self):
+ param = 'false'
+ req, body = self._create_instance_body_of_config_drive(param)
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+ def test_create_instance_with_bad_config_drive(self):
+ param = 12345
+ req, body = self._create_instance_body_of_config_drive(param)
+ self.assertRaises(self.bad_request,
+ self.controller.create, req, body=body)
+
+ def test_create_instance_without_config_drive(self):
+ param = True
+ req, body = self._create_instance_body_of_config_drive(param)
+ del body['server']['config_drive']
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
+
+ def test_create_instance_with_empty_config_drive(self):
+ param = ''
+ req, body = self._create_instance_body_of_config_drive(param)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
+
+
+class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
+ base_url = '/v2/fake/'
+ bad_request = webob.exc.HTTPBadRequest
+
+ def _set_up_controller(self):
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.controller = servers_v2.Controller(self.ext_mgr)
+ self.no_config_drive_controller = None
+
+ def _verfiy_config_drive(self, **kwargs):
+ self.assertIsNone(kwargs['config_drive'])
+
+ def _initialize_extension(self):
+ self.ext_mgr.extensions = {'os-config-drive': 'fake'}
+
+ def test_create_instance_with_empty_config_drive(self):
+ param = ''
+ req, body = self._create_instance_body_of_config_drive(param)
+ res = self.controller.create(req, body=body).obj
+ server = res['server']
+ self.assertEqual(fakes.FAKE_UUID, server['id'])
diff --git a/nova/tests/api/openstack/compute/contrib/test_console_output.py b/nova/tests/api/openstack/compute/contrib/test_console_output.py
index d3feafb819..67684da1fe 100644
--- a/nova/tests/api/openstack/compute/contrib/test_console_output.py
+++ b/nova/tests/api/openstack/compute/contrib/test_console_output.py
@@ -115,15 +115,6 @@ def test_get_console_output_filtered_characters(self):
expect = string.digits + string.letters + string.punctuation + ' \t\n'
self.assertEqual(output, {'output': expect})
- def test_get_console_output_with_non_integer_length(self):
- body = {'os-getConsoleOutput': {'length': 'NaN'}}
- req = webob.Request.blank('/v2/fake/servers/1/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
-
def test_get_text_console_no_instance(self):
self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
body = {'os-getConsoleOutput': {}}
@@ -148,16 +139,26 @@ def test_get_text_console_no_instance_on_get_output(self):
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
- def test_get_text_console_bad_body(self):
- body = {}
+ def _get_console_output_bad_request_case(self, body):
req = webob.Request.blank('/v2/fake/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
-
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
+ def test_get_console_output_with_non_integer_length(self):
+ body = {'os-getConsoleOutput': {'length': 'NaN'}}
+ self._get_console_output_bad_request_case(body)
+
+ def test_get_text_console_bad_body(self):
+ body = {}
+ self._get_console_output_bad_request_case(body)
+
+ def test_get_console_output_with_length_as_float(self):
+ body = {'os-getConsoleOutput': {'length': 2.5}}
+ self._get_console_output_bad_request_case(body)
+
def test_get_console_output_not_ready(self):
self.stubs.Set(compute_api.API, 'get_console_output',
fake_get_console_output_not_ready)
@@ -170,15 +171,6 @@ def test_get_console_output_not_ready(self):
res = req.get_response(self.app)
self.assertEqual(res.status_int, 409)
- def test_get_console_output_with_length_as_float(self):
- body = {'os-getConsoleOutput': {'length': 2.5}}
- req = webob.Request.blank('/v2/fake/servers/1/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
-
def test_not_implemented(self):
self.stubs.Set(compute_api.API, 'get_console_output',
fakes.fake_not_implemented)
diff --git a/nova/tests/api/openstack/compute/contrib/test_disk_config.py b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
index 9f60a03032..d40be0d76d 100644
--- a/nova/tests/api/openstack/compute/contrib/test_disk_config.py
+++ b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
@@ -65,7 +65,7 @@ def fake_instance_get(context, id_):
self.stubs.Set(db, 'instance_get', fake_instance_get)
def fake_instance_get_by_uuid(context, uuid,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
for instance in FAKE_INSTANCES:
if uuid == instance['uuid']:
return instance
diff --git a/nova/tests/api/openstack/compute/contrib/test_evacuate.py b/nova/tests/api/openstack/compute/contrib/test_evacuate.py
index 5ef88da3a7..ed548e7e77 100644
--- a/nova/tests/api/openstack/compute/contrib/test_evacuate.py
+++ b/nova/tests/api/openstack/compute/contrib/test_evacuate.py
@@ -69,6 +69,11 @@ def setUp(self):
for _method in self._methods:
self.stubs.Set(compute_api.API, _method, fake_compute_api)
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Evacuate'])
+
def _get_admin_context(self, user_id='fake', project_id='fake'):
ctxt = context.get_admin_context()
ctxt.user_id = user_id
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_evacuate_find_host.py b/nova/tests/api/openstack/compute/contrib/test_extended_evacuate_find_host.py
new file mode 100644
index 0000000000..187ff5a056
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_extended_evacuate_find_host.py
@@ -0,0 +1,114 @@
+# Copyright 2013 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+import mock
+import webob
+
+from nova.compute import vm_states
+from nova import context
+from nova.objects import instance as instance_obj
+from nova.openstack.common import jsonutils
+from nova import test
+from nova.tests.api.openstack import fakes
+from nova.tests import fake_instance
+
+
+class ExtendedEvacuateFindHostTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(ExtendedEvacuateFindHostTest, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_evacuate_find_host',
+ 'Evacuate'])
+ self.UUID = uuid.uuid4()
+
+ def _get_admin_context(self, user_id='fake', project_id='fake'):
+ ctxt = context.get_admin_context()
+ ctxt.user_id = user_id
+ ctxt.project_id = project_id
+ return ctxt
+
+ def _fake_compute_api(*args, **kwargs):
+ return True
+
+ def _fake_compute_api_get(self, context, instance_id, **kwargs):
+ instance = fake_instance.fake_db_instance(id=1, uuid=uuid,
+ task_state=None,
+ host='host1',
+ vm_state=vm_states.ACTIVE)
+ instance = instance_obj.Instance._from_db_object(context,
+ instance_obj.Instance(),
+ instance)
+ return instance
+
+ def _fake_service_get_by_compute_host(self, context, host):
+ return {'host_name': host,
+ 'service': 'compute',
+ 'zone': 'nova'
+ }
+
+ @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
+ @mock.patch('nova.compute.api.API.get')
+ @mock.patch('nova.compute.api.API.evacuate')
+ def test_evacuate_instance_with_no_target(self, evacuate_mock,
+ api_get_mock,
+ service_get_mock):
+ service_get_mock.side_effects = self._fake_service_get_by_compute_host
+ api_get_mock.side_effects = self._fake_compute_api_get
+ evacuate_mock.side_effects = self._fake_compute_api
+
+ ctxt = self._get_admin_context()
+ app = fakes.wsgi_app(fake_auth_context=ctxt)
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'
+ }
+ })
+ req.content_type = 'application/json'
+ res = req.get_response(app)
+ self.assertEqual(200, res.status_int)
+ evacuate_mock.assert_called_once_with(mock.ANY, mock.ANY, None,
+ mock.ANY, mock.ANY)
+
+ @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
+ @mock.patch('nova.compute.api.API.get')
+ def test_no_target_fails_if_extension_not_loaded(self, api_get_mock,
+ service_get_mock):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Evacuate'])
+ service_get_mock.side_effects = self._fake_service_get_by_compute_host
+ api_get_mock.side_effects = self._fake_compute_api_get
+
+ ctxt = self._get_admin_context()
+ app = fakes.wsgi_app(fake_auth_context=ctxt)
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % self.UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps({
+ 'evacuate': {
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'
+ }
+ })
+ req.content_type = 'application/json'
+ res = req.get_response(app)
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_hypervisors.py b/nova/tests/api/openstack/compute/contrib/test_extended_hypervisors.py
index 72be69744c..fe6586c29b 100644
--- a/nova/tests/api/openstack/compute/contrib/test_extended_hypervisors.py
+++ b/nova/tests/api/openstack/compute/contrib/test_extended_hypervisors.py
@@ -91,7 +91,8 @@ def test_detail(self):
host_ip='2.2.2.2')]))
def test_show_withid(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1',
+ use_admin_context=True)
result = self.controller.show(req, '1')
self.assertEqual(result, dict(hypervisor=dict(
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py b/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py
index 4cd9108087..b69771c855 100644
--- a/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_extended_server_attributes.py
@@ -54,25 +54,24 @@ def fake_compute_get_all(*args, **kwargs):
db_list, fields)
-class ExtendedServerAttributesTest(test.TestCase):
+class ExtendedServerAttributesTestV21(test.TestCase):
content_type = 'application/json'
prefix = 'OS-EXT-SRV-ATTR:'
+ fake_url = '/v3'
def setUp(self):
- super(ExtendedServerAttributesTest, self).setUp()
+ super(ExtendedServerAttributesTestV21, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self.stubs.Set(db, 'instance_get_by_uuid', fake_compute_get)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Extended_server_attributes'])
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ res = req.get_response(
+ fakes.wsgi_app_v3(init_only=('servers',
+ 'os-extended-server-attributes')))
return res
def _get_server(self, body):
@@ -89,7 +88,7 @@ def assertServerAttributes(self, server, host, node, instance_name):
node)
def test_show(self):
- url = '/v2/fake/servers/%s' % UUID3
+ url = self.fake_url + '/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
@@ -99,7 +98,7 @@ def test_show(self):
instance_name=NAME_FMT % 1)
def test_detail(self):
- url = '/v2/fake/servers/detail'
+ url = self.fake_url + '/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
@@ -115,13 +114,30 @@ def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ url = self.fake_url + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
-class ExtendedServerAttributesXmlTest(ExtendedServerAttributesTest):
+class ExtendedServerAttributesTestV2(ExtendedServerAttributesTestV21):
+ fake_url = '/v2/fake'
+
+ def setUp(self):
+ super(ExtendedServerAttributesTestV2, self).setUp()
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_server_attributes'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+
+class ExtendedServerAttributesXmlTest(ExtendedServerAttributesTestV2):
content_type = 'application/xml'
ext = extended_server_attributes
prefix = '{%s}' % ext.Extended_server_attributes.namespace
diff --git a/nova/tests/api/openstack/compute/contrib/test_extended_status.py b/nova/tests/api/openstack/compute/contrib/test_extended_status.py
index 7269437040..d75296232f 100644
--- a/nova/tests/api/openstack/compute/contrib/test_extended_status.py
+++ b/nova/tests/api/openstack/compute/contrib/test_extended_status.py
@@ -52,28 +52,31 @@ def fake_compute_get_all(*args, **kwargs):
db_list, fields)
-class ExtendedStatusTest(test.TestCase):
+class ExtendedStatusTestV21(test.TestCase):
content_type = 'application/json'
prefix = 'OS-EXT-STS:'
+ fake_url = '/v3'
+
+ def _set_flags(self):
+ pass
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app_v3(
+ init_only=('servers',
+ 'os-extended-status')))
+ return res
def setUp(self):
- super(ExtendedStatusTest, self).setUp()
+ super(ExtendedStatusTestV21, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Extended_status'])
+ self._set_flags()
return_server = fakes.fake_instance_get()
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
- return res
-
def _get_server(self, body):
return jsonutils.loads(body).get('server')
@@ -87,7 +90,7 @@ def assertServerStates(self, server, vm_state, power_state, task_state):
self.assertEqual(server.get('%stask_state' % self.prefix), task_state)
def test_show(self):
- url = '/v2/fake/servers/%s' % UUID3
+ url = self.fake_url + '/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
@@ -97,7 +100,7 @@ def test_show(self):
task_state='kayaking')
def test_detail(self):
- url = '/v2/fake/servers/detail'
+ url = self.fake_url + '/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
@@ -113,13 +116,29 @@ def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
+ url = self.fake_url + '/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
-class ExtendedStatusXmlTest(ExtendedStatusTest):
+class ExtendedStatusTestV2(ExtendedStatusTestV21):
+ fake_url = '/v2/fake'
+
+ def _set_flags(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Extended_status'])
+
+ def _make_request(self, url):
+ req = webob.Request.blank(url)
+ req.headers['Accept'] = self.content_type
+ res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ return res
+
+
+class ExtendedStatusXmlTest(ExtendedStatusTestV2):
content_type = 'application/xml'
prefix = '{%s}' % extended_status.Extended_status.namespace
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_access.py b/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
index d416d75009..c2ade23401 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_access.py
@@ -84,7 +84,7 @@ def _has_flavor_access(flavorid, projectid):
def fake_get_all_flavors_sorted_list(context, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
- if filters == None or filters['is_public'] == None:
+ if filters is None or filters['is_public'] is None:
return sorted(INSTANCE_TYPES.values(), key=lambda item: item[sort_key])
res = {}
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
index c9b378fe80..a583a9d31f 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_manage.py
@@ -30,7 +30,7 @@
def fake_get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted='yes'):
if flavorid == 'failtest':
- raise exception.NotFound("Not found sucka!")
+ raise exception.FlavorNotFound(flavor_id=flavorid)
elif not str(flavorid) == '1234':
raise Exception("This test expects flavorid 1234, not %s" % flavorid)
if read_deleted != 'no':
@@ -96,19 +96,9 @@ def setUp(self):
self.controller = flavormanage.FlavorManageController()
self.app = fakes.wsgi_app(init_only=('flavors',))
- def test_delete(self):
- req = fakes.HTTPRequest.blank('/v2/123/flavors/1234')
- res = self.controller._delete(req, 1234)
- self.assertEqual(res.status_int, 202)
-
- # subsequent delete should fail
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller._delete, req, "failtest")
-
- def test_create(self):
- expected = {
+ self.request_body = {
"flavor": {
- "name": "azAZ09. -_",
+ "name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
@@ -119,284 +109,180 @@ def test_create(self):
"os-flavor-access:is_public": True,
}
}
+ self.expected_flavor = self.request_body
- url = '/v2/fake/flavors'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(expected)
- res = req.get_response(self.app)
- body = jsonutils.loads(res.body)
- for key in expected["flavor"]:
- self.assertEqual(body["flavor"][key], expected["flavor"][key])
+ def test_delete(self):
+ req = fakes.HTTPRequest.blank('/v2/123/flavors/1234')
+ res = self.controller._delete(req, 1234)
+ self.assertEqual(res.status_int, 202)
- def test_create_invalid_name(self):
- self.stubs.UnsetAll()
- expected = {
+ # subsequent delete should fail
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._delete, req, "failtest")
+
+ def _test_create_missing_parameter(self, parameter):
+ body = {
"flavor": {
- "name": "bad !@#!$% name",
- 'id': "1",
+ "name": "azAZ09. -_",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
+ "id": unicode('1234'),
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
- url = '/v2/fake/flavors'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(expected)
- res = req.get_response(self.app)
- self.assertEqual(res.status_code, 400)
+ del body['flavor'][parameter]
- def test_create_flavor_name_is_whitespace(self):
- request_dict = {
- "flavor": {
- "name": " ",
- 'id': "12345",
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": True,
- }
- }
+ req = fakes.HTTPRequest.blank('/v2/123/flavors')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller._create,
+ req, body)
- url = '/v2/fake/flavors'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(request_dict)
- res = req.get_response(self.app)
- self.assertEqual(res.status_code, 400)
+ def test_create_missing_name(self):
+ self._test_create_missing_parameter('name')
- def test_create_flavor_name_with_leading_trailing_whitespace(self):
- request_dict = {
- "flavor": {
- "name": " test ",
- 'id': "12345",
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": True,
- }
- }
+ def test_create_missing_ram(self):
+ self._test_create_missing_parameter('ram')
+
+ def test_create_missing_vcpus(self):
+ self._test_create_missing_parameter('vcpus')
+ def test_create_missing_disk(self):
+ self._test_create_missing_parameter('disk')
+
+ def _create_flavor_success_case(self, body):
url = '/v2/fake/flavors'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
- req.body = jsonutils.dumps(request_dict)
+ req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
- self.assertEqual(res.status_code, 200)
- body = jsonutils.loads(res.body)
- self.assertEqual("test", body["flavor"]["name"])
+ self.assertEqual(200, res.status_code)
+ return jsonutils.loads(res.body)
- def test_create_public_default(self):
- flavor = {
- "flavor": {
- "name": "test",
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "id": 1234,
- "swap": 512,
- "rxtx_factor": 1,
- }
- }
+ def test_create(self):
+ body = self._create_flavor_success_case(self.request_body)
+ for key in self.expected_flavor["flavor"]:
+ self.assertEqual(body["flavor"][key],
+ self.expected_flavor["flavor"][key])
- expected = {
- "flavor": {
- "name": "test",
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "id": unicode(1234),
- "swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": True,
- }
- }
+ def test_create_public_default(self):
+ del self.request_body['flavor']['os-flavor-access:is_public']
+ body = self._create_flavor_success_case(self.request_body)
+ for key in self.expected_flavor["flavor"]:
+ self.assertEqual(body["flavor"][key],
+ self.expected_flavor["flavor"][key])
- url = '/v2/fake/flavors'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(flavor)
- res = req.get_response(self.app)
- body = jsonutils.loads(res.body)
- for key in expected["flavor"]:
- self.assertEqual(body["flavor"][key], expected["flavor"][key])
+ def test_create_flavor_name_with_leading_trailing_whitespace(self):
+ self.request_body['flavor']['name'] = " test "
+ body = self._create_flavor_success_case(self.request_body)
+ self.assertEqual("test", body["flavor"]["name"])
def test_create_without_flavorid(self):
- expected = {
- "flavor": {
- "name": "test",
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": True,
- }
- }
+ del self.request_body['flavor']['id']
+ body = self._create_flavor_success_case(self.request_body)
+ for key in self.expected_flavor["flavor"]:
+ self.assertEqual(body["flavor"][key],
+ self.expected_flavor["flavor"][key])
+
+ def _create_flavor_bad_request_case(self, body):
+ self.stubs.UnsetAll()
url = '/v2/fake/flavors'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
- req.body = jsonutils.dumps(expected)
+ req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
- body = jsonutils.loads(res.body)
- for key in expected["flavor"]:
- self.assertEqual(body["flavor"][key], expected["flavor"][key])
+ self.assertEqual(res.status_code, 400)
- def test_create_without_flavorname(self):
- expected = {
- "flavor": {
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": True,
- }
- }
+ def test_create_invalid_name(self):
+ self.request_body['flavor']['name'] = 'bad !@#!$% name'
+ self._create_flavor_bad_request_case(self.request_body)
- url = '/v2/fake/flavors'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(expected)
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
+ def test_create_flavor_name_is_whitespace(self):
+ self.request_body['flavor']['name'] = ' '
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_name_too_long(self):
+ self.request_body['flavor']['name'] = 'a' * 256
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_without_flavorname(self):
+ del self.request_body['flavor']['name']
+ self._create_flavor_bad_request_case(self.request_body)
def test_create_empty_body(self):
- self.stubs.UnsetAll()
- expected = {
+ body = {
"flavor": {}
}
-
- url = '/v2/fake/flavors'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(expected)
- res = req.get_response(self.app)
- self.assertEqual(res.status_code, 400)
+ self._create_flavor_bad_request_case(body)
def test_create_no_body(self):
- self.stubs.UnsetAll()
- expected = {}
-
- url = '/v2/fake/flavors'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(expected)
- res = req.get_response(self.app)
- self.assertEqual(res.status_code, 400)
+ body = {}
+ self._create_flavor_bad_request_case(body)
def test_create_invalid_format_body(self):
- self.stubs.UnsetAll()
- expected = {
+ body = {
"flavor": []
}
-
- url = '/v2/fake/flavors'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(expected)
- res = req.get_response(self.app)
- self.assertEqual(res.status_code, 400)
+ self._create_flavor_bad_request_case(body)
def test_create_invalid_flavorid(self):
- self.stubs.UnsetAll()
- expected = {
- "flavor": {
- "name": "test",
- 'id': "!@#!$#!$^#&^$&",
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": True,
- }
- }
-
- url = '/v2/fake/flavors'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(expected)
- res = req.get_response(self.app)
- self.assertEqual(res.status_code, 400)
+ self.request_body['flavor']['id'] = "!@#!$#!$^#&^$&"
+ self._create_flavor_bad_request_case(self.request_body)
def test_create_check_flavor_id_length(self):
- self.stubs.UnsetAll()
MAX_LENGTH = 255
- expected = {
- "flavor": {
- "name": "test",
- 'id': "a" * (MAX_LENGTH + 1),
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": True,
- }
- }
-
- url = '/v2/fake/flavors'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(expected)
- res = req.get_response(self.app)
- self.assertEqual(res.status_code, 400)
+ self.request_body['flavor']['id'] = "a" * (MAX_LENGTH + 1)
+ self._create_flavor_bad_request_case(self.request_body)
def test_create_with_leading_trailing_whitespaces_in_flavor_id(self):
- self.stubs.UnsetAll()
- expected = {
- "flavor": {
- "name": "test",
- 'id': " bad_id ",
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": True,
- }
- }
+ self.request_body['flavor']['id'] = " bad_id "
+ self._create_flavor_bad_request_case(self.request_body)
- url = '/v2/fake/flavors'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(expected)
- res = req.get_response(self.app)
- self.assertEqual(res.status_code, 400)
+ def test_create_without_ram(self):
+ del self.request_body['flavor']['ram']
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_0_ram(self):
+ self.request_body['flavor']['ram'] = 0
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_without_vcpus(self):
+ del self.request_body['flavor']['vcpus']
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_0_vcpus(self):
+ self.request_body['flavor']['vcpus'] = 0
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_without_disk(self):
+ del self.request_body['flavor']['disk']
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_minus_disk(self):
+ self.request_body['flavor']['disk'] = -1
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_minus_ephemeral(self):
+ self.request_body['flavor']['OS-FLV-EXT-DATA:ephemeral'] = -1
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_minus_swap(self):
+ self.request_body['flavor']['swap'] = -1
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_minus_rxtx_factor(self):
+ self.request_body['flavor']['rxtx_factor'] = -1
+ self._create_flavor_bad_request_case(self.request_body)
+
+ def test_create_with_non_boolean_is_public(self):
+ self.request_body['flavor']['os-flavor-access:is_public'] = 123
+ self._create_flavor_bad_request_case(self.request_body)
def test_flavor_exists_exception_returns_409(self):
expected = {
@@ -449,11 +335,6 @@ class FakeRequest(object):
class PrivateFlavorManageTest(test.TestCase):
def setUp(self):
super(PrivateFlavorManageTest, self).setUp()
- # self.stubs.Set(flavors,
- # "get_flavor_by_flavor_id",
- # fake_get_flavor_by_flavor_id)
- # self.stubs.Set(flavors, "destroy", fake_destroy)
- # self.stubs.Set(flavors, "create", fake_create)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
@@ -462,10 +343,11 @@ def setUp(self):
self.controller = flavormanage.FlavorManageController()
self.flavor_access_controller = flavor_access.FlavorAccessController()
- self.app = fakes.wsgi_app(init_only=('flavors',))
-
- def test_create_private_flavor_should_not_grant_flavor_access(self):
- expected = {
+ self.ctxt = context.RequestContext('fake', 'fake',
+ is_admin=True, auth_token=True)
+ self.app = fakes.wsgi_app(init_only=('flavors',),
+ fake_auth_context=self.ctxt)
+ self.expected = {
"flavor": {
"name": "test",
"ram": 512,
@@ -473,59 +355,37 @@ def test_create_private_flavor_should_not_grant_flavor_access(self):
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": False
+ "rxtx_factor": 1
}
}
- ctxt = context.RequestContext('fake', 'fake',
- is_admin=True, auth_token=True)
- self.app = fakes.wsgi_app(init_only=('flavors',),
- fake_auth_context=ctxt)
+ def _get_response(self):
url = '/v2/fake/flavors'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
- req.body = jsonutils.dumps(expected)
+ req.body = jsonutils.dumps(self.expected)
res = req.get_response(self.app)
- body = jsonutils.loads(res.body)
- for key in expected["flavor"]:
- self.assertEqual(body["flavor"][key], expected["flavor"][key])
+ return jsonutils.loads(res.body)
+
+ def test_create_private_flavor_should_not_grant_flavor_access(self):
+ self.expected["flavor"]["os-flavor-access:is_public"] = False
+ body = self._get_response()
+ for key in self.expected["flavor"]:
+ self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
flavor_access_body = self.flavor_access_controller.index(
FakeRequest(), body["flavor"]["id"])
expected_flavor_access_body = {
- "tenant_id": "%s" % ctxt.project_id,
+ "tenant_id": "%s" % self.ctxt.project_id,
"flavor_id": "%s" % body["flavor"]["id"]
}
self.assertNotIn(expected_flavor_access_body,
flavor_access_body["flavor_access"])
def test_create_public_flavor_should_not_create_flavor_access(self):
- expected = {
- "flavor": {
- "name": "test",
- "ram": 512,
- "vcpus": 2,
- "disk": 1,
- "OS-FLV-EXT-DATA:ephemeral": 1,
- "swap": 512,
- "rxtx_factor": 1,
- "os-flavor-access:is_public": True
- }
- }
-
- ctxt = context.RequestContext('fake', 'fake',
- is_admin=True, auth_token=True)
- self.app = fakes.wsgi_app(init_only=('flavors',),
- fake_auth_context=ctxt)
+ self.expected["flavor"]["os-flavor-access:is_public"] = True
self.mox.StubOutWithMock(flavors, "add_flavor_access")
self.mox.ReplayAll()
- url = '/v2/fake/flavors'
- req = webob.Request.blank(url)
- req.headers['Content-Type'] = 'application/json'
- req.method = 'POST'
- req.body = jsonutils.dumps(expected)
- res = req.get_response(self.app)
- body = jsonutils.loads(res.body)
- for key in expected["flavor"]:
- self.assertEqual(body["flavor"][key], expected["flavor"][key])
+ body = self._get_response()
+ for key in self.expected["flavor"]:
+ self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py b/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
index 1136facec5..9494539a4b 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavor_swap.py
@@ -38,7 +38,7 @@
}
-#TOD(jogo) dedup these across nova.api.openstack.contrib.test_flavor*
+# TODO(jogo) dedup these across nova.api.openstack.contrib.test_flavor*
def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
diff --git a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py b/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py
index f9b549a330..0da6253310 100644
--- a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py
+++ b/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py
@@ -151,13 +151,15 @@ def test_create(self):
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
return_create_flavor_extra_specs)
- body = {"extra_specs": {"key1": "value1"}}
+ body = {"extra_specs": {"key1": "value1", "key2": 0.5, "key3": 5}}
req = fakes.HTTPRequest.blank('/v2/fake/flavors/1/os-extra_specs',
use_admin_context=True)
res_dict = self.controller.create(req, 1, body)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
+ self.assertEqual(0.5, res_dict['extra_specs']['key2'])
+ self.assertEqual(5, res_dict['extra_specs']['key3'])
def test_create_no_admin(self):
self.stubs.Set(nova.db,
@@ -211,6 +213,9 @@ def test_create_empty_body(self):
def test_create_non_dict_extra_specs(self):
self._test_create_bad_request({"extra_specs": "non_dict"})
+ def test_create_non_string_key(self):
+ self._test_create_bad_request({"extra_specs": {None: "value1"}})
+
def test_create_non_string_value(self):
self._test_create_bad_request({"extra_specs": {"key1": None}})
@@ -225,6 +230,10 @@ def test_create_long_value(self):
value = "a" * 256
self._test_create_bad_request({"extra_specs": {"key1": value}})
+ def test_create_really_long_integer_value(self):
+ value = 10 ** 1000
+ self._test_create_bad_request({"extra_specs": {"key1": value}})
+
@mock.patch('nova.db.flavor_extra_specs_update_or_create')
def test_create_invalid_specs_key(self, mock_flavor_extra_specs):
invalid_keys = ("key1/", "", "$$akey$", "!akey", "")
@@ -292,6 +301,9 @@ def test_update_item_too_many_keys(self):
def test_update_item_non_dict_extra_specs(self):
self._test_update_item_bad_request("non_dict")
+ def test_update_item_non_string_key(self):
+ self._test_update_item_bad_request({None: "value1"})
+
def test_update_item_non_string_value(self):
self._test_update_item_bad_request({"key1": None})
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py b/nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py
index 91a7d0a53d..da0e62bcf1 100644
--- a/nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py
+++ b/nova/tests/api/openstack/compute/contrib/test_floating_ip_pools.py
@@ -23,8 +23,7 @@
def fake_get_floating_ip_pools(self, context):
- return [{'name': 'nova'},
- {'name': 'other'}]
+ return ['nova', 'other']
class FloatingIpPoolTest(test.NoDBTestCase):
@@ -41,16 +40,16 @@ def test_translate_floating_ip_pools_view(self):
view = floating_ip_pools._translate_floating_ip_pools_view(pools)
self.assertIn('floating_ip_pools', view)
self.assertEqual(view['floating_ip_pools'][0]['name'],
- pools[0]['name'])
+ pools[0])
self.assertEqual(view['floating_ip_pools'][1]['name'],
- pools[1]['name'])
+ pools[1])
def test_floating_ips_pools_list(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ip-pools')
res_dict = self.controller.index(req)
pools = fake_get_floating_ip_pools(None, self.context)
- response = {'floating_ip_pools': pools}
+ response = {'floating_ip_pools': [{'name': name} for name in pools]}
self.assertEqual(res_dict, response)
diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
index 615c3be557..27fc434a3c 100644
--- a/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
+++ b/nova/tests/api/openstack/compute/contrib/test_floating_ips.py
@@ -344,6 +344,15 @@ def test_floating_ip_allocate_quota_exceed_in_pool(self, allocate_mock):
self.assertIn('IP allocation over quota in pool non_existent_pool.',
ex.explanation)
+ @mock.patch('nova.network.api.API.allocate_floating_ip',
+ side_effect=exception.FloatingIpPoolNotFound())
+ def test_floating_ip_create_with_unknown_pool(self, allocate_mock):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-floating-ips')
+ ex = self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller.create, req, {'pool': 'non_existent_pool'})
+
+ self.assertIn('Floating ip pool not found.', ex.explanation)
+
def test_floating_ip_allocate(self):
def fake1(*args, **kwargs):
pass
diff --git a/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py b/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py
index 905137eafb..b840968ca2 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hide_server_addresses.py
@@ -41,23 +41,25 @@ def _return_server(*_args, **_kwargs):
return _return_server
-class HideServerAddressesTest(test.TestCase):
+class HideServerAddressesTestV21(test.TestCase):
content_type = 'application/json'
+ base_url = '/v3/servers'
+
+ def _setup_wsgi(self):
+ self.wsgi_app = fakes.wsgi_app_v3(
+ init_only=('servers', 'os-hide-server-addresses'))
def setUp(self):
- super(HideServerAddressesTest, self).setUp()
+ super(HideServerAddressesTestV21, self).setUp()
fakes.stub_out_nw_api(self.stubs)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Hide_server_addresses'])
return_server = fakes.fake_instance_get()
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self._setup_wsgi()
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app(init_only=('servers',)))
+ res = req.get_response(self.wsgi_app)
return res
@staticmethod
@@ -85,7 +87,7 @@ def test_show_hides_in_building(self):
self.stubs.Set(compute.api.API, 'get',
fake_compute_get(instance_id, uuid=uuid,
vm_state=vm_states.BUILDING))
- res = self._make_request('/v2/fake/servers/%s' % uuid)
+ res = self._make_request(self.base_url + '/%s' % uuid)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
@@ -98,7 +100,7 @@ def test_show(self):
self.stubs.Set(compute.api.API, 'get',
fake_compute_get(instance_id, uuid=uuid,
vm_state=vm_states.ACTIVE))
- res = self._make_request('/v2/fake/servers/%s' % uuid)
+ res = self._make_request(self.base_url + '/%s' % uuid)
self.assertEqual(res.status_int, 200)
server = self._get_server(res.body)
@@ -118,7 +120,7 @@ def get_all(*args, **kwargs):
args[1], objects.InstanceList(), instances, fields)
self.stubs.Set(compute.api.API, 'get_all', get_all)
- res = self._make_request('/v2/fake/servers/detail')
+ res = self._make_request(self.base_url + '/detail')
self.assertEqual(res.status_int, 200)
servers = self._get_servers(res.body)
@@ -136,12 +138,23 @@ def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- res = self._make_request('/v2/fake/servers/' + fakes.get_fake_uuid())
+ res = self._make_request(self.base_url + '/' + fakes.get_fake_uuid())
self.assertEqual(res.status_int, 404)
-class HideAddressesXmlTest(HideServerAddressesTest):
+class HideServerAddressesTestV2(HideServerAddressesTestV21):
+ base_url = '/v2/fake/servers'
+
+ def _setup_wsgi(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Hide_server_addresses'])
+ self.wsgi_app = fakes.wsgi_app(init_only=('servers',))
+
+
+class HideAddressesXmlTest(HideServerAddressesTestV2):
content_type = 'application/xml'
@staticmethod
diff --git a/nova/tests/api/openstack/compute/contrib/test_hypervisor_status.py b/nova/tests/api/openstack/compute/contrib/test_hypervisor_status.py
new file mode 100644
index 0000000000..1d8cd95358
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_hypervisor_status.py
@@ -0,0 +1,80 @@
+# Copyright 2014 Intel Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+import mock
+
+from nova.api.openstack.compute.contrib import hypervisors
+from nova.tests.api.openstack.compute.contrib import test_hypervisors
+
+
+TEST_HYPER = dict(test_hypervisors.TEST_HYPERS[0],
+ service=dict(id=1,
+ host="compute1",
+ binary="nova-compute",
+ topic="compute_topic",
+ report_count=5,
+ disabled=False,
+ disabled_reason=None,
+ availability_zone="nova"),
+ )
+
+
+class HypervisorStatusTest(test_hypervisors.HypervisorsTest):
+ def _prepare_extension(self):
+ self.ext_mgr.extensions['os-hypervisor-status'] = True
+ self.controller = hypervisors.HypervisorsController(self.ext_mgr)
+ self.controller.servicegroup_api.service_is_up = mock.MagicMock(
+ return_value=True)
+
+ def test_view_hypervisor_service_status(self):
+ self._prepare_extension()
+ result = self.controller._view_hypervisor(
+ TEST_HYPER, False)
+ self.assertEqual('enabled', result['status'])
+ self.assertEqual('up', result['state'])
+ self.assertEqual('enabled', result['status'])
+
+ self.controller.servicegroup_api.service_is_up.return_value = False
+ result = self.controller._view_hypervisor(
+ TEST_HYPER, False)
+ self.assertEqual('down', result['state'])
+
+ hyper = copy.deepcopy(TEST_HYPER)
+ hyper['service']['disabled'] = True
+ result = self.controller._view_hypervisor(hyper, False)
+ self.assertEqual('disabled', result['status'])
+
+ def test_view_hypervisor_detail_status(self):
+ self._prepare_extension()
+
+ result = self.controller._view_hypervisor(
+ TEST_HYPER, True)
+
+ self.assertEqual('enabled', result['status'])
+ self.assertEqual('up', result['state'])
+ self.assertIsNone(result['service']['disabled_reason'])
+
+ self.controller.servicegroup_api.service_is_up.return_value = False
+ result = self.controller._view_hypervisor(
+ TEST_HYPER, True)
+ self.assertEqual('down', result['state'])
+
+ hyper = copy.deepcopy(TEST_HYPER)
+ hyper['service']['disabled'] = True
+ hyper['service']['disabled_reason'] = "fake"
+ result = self.controller._view_hypervisor(hyper, True)
+ self.assertEqual('disabled', result['status'],)
+ self.assertEqual('fake', result['service']['disabled_reason'])
diff --git a/nova/tests/api/openstack/compute/contrib/test_hypervisors.py b/nova/tests/api/openstack/compute/contrib/test_hypervisors.py
index 80f4991754..c6814ce24f 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hypervisors.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hypervisors.py
@@ -20,7 +20,6 @@
from nova.api.openstack import extensions
from nova import context
from nova import db
-from nova.db.sqlalchemy import api as db_api
from nova import exception
from nova import test
from nova.tests.api.openstack import fakes
@@ -83,7 +82,6 @@
dict(name="inst4", uuid="uuid4", host="compute2")]
-@db_api.require_admin_context
def fake_compute_node_get_all(context):
return TEST_HYPERS
@@ -200,6 +198,12 @@ def test_index(self):
dict(id=1, hypervisor_hostname="hyper1"),
dict(id=2, hypervisor_hostname="hyper2")]))
+ def test_index_non_admin(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors',
+ use_admin_context=False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.index, req)
+
def test_detail(self):
req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/detail',
use_admin_context=True)
@@ -241,12 +245,20 @@ def test_detail(self):
cpu_info='cpu_info',
disk_available_least=100)]))
+ def test_detail_non_admin(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/detail',
+ use_admin_context=False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.detail, req)
+
def test_show_noid(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/3')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/3',
+ use_admin_context=True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3')
def test_show_withid(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1',
+ use_admin_context=True)
result = self.controller.show(req, '1')
self.assertEqual(result, dict(hypervisor=dict(
@@ -268,8 +280,15 @@ def test_show_withid(self):
cpu_info='cpu_info',
disk_available_least=100)))
+ def test_show_non_admin(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1',
+ use_admin_context=False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.show, req, '1')
+
def test_uptime_noid(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/3')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/3',
+ use_admin_context=True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '3')
def test_uptime_notimplemented(self):
@@ -279,7 +298,8 @@ def fake_get_host_uptime(context, hyp):
self.stubs.Set(self.controller.host_api, 'get_host_uptime',
fake_get_host_uptime)
- req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1',
+ use_admin_context=True)
self.assertRaises(exc.HTTPNotImplemented,
self.controller.uptime, req, '1')
@@ -290,7 +310,8 @@ def fake_get_host_uptime(context, hyp):
self.stubs.Set(self.controller.host_api, 'get_host_uptime',
fake_get_host_uptime)
- req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1',
+ use_admin_context=True)
result = self.controller.uptime(req, '1')
self.assertEqual(result, dict(hypervisor=dict(
@@ -298,8 +319,15 @@ def fake_get_host_uptime(context, hyp):
hypervisor_hostname="hyper1",
uptime="fake uptime")))
+ def test_uptime_non_admin(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/1',
+ use_admin_context=False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.uptime, req, '1')
+
def test_search(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/hyper/search')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/hyper/search',
+ use_admin_context=True)
result = self.controller.search(req, 'hyper')
self.assertEqual(result, dict(hypervisors=[
@@ -307,7 +335,8 @@ def test_search(self):
dict(id=2, hypervisor_hostname="hyper2")]))
def test_servers(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/hyper/servers')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/hyper/servers',
+ use_admin_context=True)
result = self.controller.servers(req, 'hyper')
self.assertEqual(result, dict(hypervisors=[
@@ -322,8 +351,15 @@ def test_servers(self):
dict(name="inst2", uuid="uuid2"),
dict(name="inst4", uuid="uuid4")])]))
+ def test_servers_non_admin(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/hyper/servers',
+ use_admin_context=False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.servers, req, '1')
+
def test_statistics(self):
- req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/statistics')
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/statistics',
+ use_admin_context=True)
result = self.controller.statistics(req)
self.assertEqual(result, dict(hypervisor_statistics=dict(
@@ -340,6 +376,12 @@ def test_statistics(self):
running_vms=4,
disk_available_least=200)))
+ def test_statistics_non_admin(self):
+ req = fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/statistics',
+ use_admin_context=False)
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.statistics, req)
+
class HypervisorsSerializersTest(test.NoDBTestCase):
def compare_to_exemplar(self, exemplar, hyper):
diff --git a/nova/tests/api/openstack/compute/contrib/test_instance_actions.py b/nova/tests/api/openstack/compute/contrib/test_instance_actions.py
index 0c4f0c1558..51bc6886a0 100644
--- a/nova/tests/api/openstack/compute/contrib/test_instance_actions.py
+++ b/nova/tests/api/openstack/compute/contrib/test_instance_actions.py
@@ -79,7 +79,7 @@ def test_list_actions_restricted_by_project(self):
def fake_instance_get_by_uuid(context, instance_id,
columns_to_join=None,
- use_slave=False):
+ use_subordinate=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
@@ -97,7 +97,7 @@ def test_get_action_restricted_by_project(self):
def fake_instance_get_by_uuid(context, instance_id,
columns_to_join=None,
- use_slave=False):
+ use_subordinate=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
@@ -120,7 +120,7 @@ def fake_get(self, context, instance_uuid, expected_attrs=None,
want_objects=False):
return {'uuid': instance_uuid}
- def fake_instance_get_by_uuid(context, instance_id, use_slave=False):
+ def fake_instance_get_by_uuid(context, instance_id, use_subordinate=False):
return {'name': 'fake', 'project_id': context.project_id}
self.stubs.Set(compute_api.API, 'get', fake_get)
diff --git a/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py b/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py
index 970cb8ff16..0ff6ec3105 100644
--- a/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py
+++ b/nova/tests/api/openstack/compute/contrib/test_instance_usage_audit_log.py
@@ -40,11 +40,11 @@
end3 = datetime.datetime(2012, 7, 7, 6, 0, 0)
-#test data
+# test data
TEST_LOGS1 = [
- #all services done, no errors.
+ # all services done, no errors.
dict(host="plonk", period_beginning=begin1, period_ending=end1,
state="DONE", errors=0, task_items=23, message="test1"),
dict(host="baz", period_beginning=begin1, period_ending=end1,
@@ -57,7 +57,7 @@
TEST_LOGS2 = [
- #some still running...
+ # some still running...
dict(host="plonk", period_beginning=begin2, period_ending=end2,
state="DONE", errors=0, task_items=23, message="test5"),
dict(host="baz", period_beginning=begin2, period_ending=end2,
@@ -70,7 +70,7 @@
TEST_LOGS3 = [
- #some errors..
+ # some errors..
dict(host="plonk", period_beginning=begin3, period_ending=end3,
state="DONE", errors=0, task_items=23, message="test9"),
dict(host="baz", period_beginning=begin3, period_ending=end3,
diff --git a/nova/tests/api/openstack/compute/contrib/test_keypairs.py b/nova/tests/api/openstack/compute/contrib/test_keypairs.py
index dd1851f056..fb53eefc98 100644
--- a/nova/tests/api/openstack/compute/contrib/test_keypairs.py
+++ b/nova/tests/api/openstack/compute/contrib/test_keypairs.py
@@ -100,19 +100,17 @@ def test_keypair_create(self):
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
- def test_keypair_create_with_empty_name(self):
- body = {'keypair': {'name': ''}}
+ def _test_keypair_create_bad_request_case(self, body):
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
- res_dict = jsonutils.loads(res.body)
- self.assertEqual(
- 'Keypair data is invalid: '
- 'Keypair name must be between 1 and 255 characters long',
- res_dict['badRequest']['message'])
+
+ def test_keypair_create_with_empty_name(self):
+ body = {'keypair': {'name': ''}}
+ self._test_keypair_create_bad_request_case(body)
def test_keypair_create_with_name_too_long(self):
body = {
@@ -120,17 +118,7 @@ def test_keypair_create_with_name_too_long(self):
'name': 'a' * 256
}
}
- req = webob.Request.blank('/v2/fake/os-keypairs')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
- res_dict = jsonutils.loads(res.body)
- self.assertEqual(
- 'Keypair data is invalid: '
- 'Keypair name must be between 1 and 255 characters long',
- res_dict['badRequest']['message'])
+ self._test_keypair_create_bad_request_case(body)
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
@@ -138,18 +126,20 @@ def test_keypair_create_with_non_alphanumeric_name(self):
'name': 'test/keypair'
}
}
- req = webob.Request.blank('/v2/fake/os-keypairs')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- res_dict = jsonutils.loads(res.body)
- self.assertEqual(res.status_int, 400)
- res_dict = jsonutils.loads(res.body)
- self.assertEqual(
- "Keypair data is invalid: "
- "Keypair name contains unsafe characters",
- res_dict['badRequest']['message'])
+ self._test_keypair_create_bad_request_case(body)
+
+ def test_keypair_import_bad_key(self):
+ body = {
+ 'keypair': {
+ 'name': 'create_test',
+ 'public_key': 'ssh-what negative',
+ },
+ }
+ self._test_keypair_create_bad_request_case(body)
+
+ def test_keypair_create_with_invalid_keypair_body(self):
+ body = {'alpha': {'name': 'create_test'}}
+ self._test_keypair_create_bad_request_case(body)
def test_keypair_import(self):
body = {
@@ -205,11 +195,11 @@ def fake_quotas_count(self, context, resource, *args, **kwargs):
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
- self.assertEqual(res.status_int, 413)
+ self.assertEqual(res.status_int, 403)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
- res_dict['overLimit']['message'])
+ res_dict['forbidden']['message'])
def test_keypair_create_quota_limit(self):
@@ -229,11 +219,11 @@ def fake_quotas_count(self, context, resource, *args, **kwargs):
req.body = jsonutils.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
- self.assertEqual(res.status_int, 413)
+ self.assertEqual(res.status_int, 403)
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Quota exceeded, too many key pairs.",
- res_dict['overLimit']['message'])
+ res_dict['forbidden']['message'])
def test_keypair_create_duplicate(self):
self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
@@ -249,26 +239,6 @@ def test_keypair_create_duplicate(self):
"Key pair 'create_duplicate' already exists.",
res_dict['conflictingRequest']['message'])
- def test_keypair_import_bad_key(self):
- body = {
- 'keypair': {
- 'name': 'create_test',
- 'public_key': 'ssh-what negative',
- },
- }
-
- req = webob.Request.blank('/v2/fake/os-keypairs')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- self.assertEqual(res.status_int, 400)
-
- res_dict = jsonutils.loads(res.body)
- self.assertEqual(
- 'Keypair data is invalid: failed to generate fingerprint',
- res_dict['badRequest']['message'])
-
def test_keypair_delete(self):
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'DELETE'
@@ -348,18 +318,6 @@ def test_detail_servers(self):
self.assertIn('key_name', server_dict)
self.assertEqual(server_dict['key_name'], '')
- def test_keypair_create_with_invalid_keypair_body(self):
- body = {'alpha': {'name': 'create_test'}}
- req = webob.Request.blank('/v1.1/fake/os-keypairs')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers['Content-Type'] = 'application/json'
- res = req.get_response(self.app)
- res_dict = jsonutils.loads(res.body)
- self.assertEqual(res.status_int, 400)
- self.assertEqual(res_dict['badRequest']['message'],
- "Invalid request body")
-
class KeypairPolicyTest(test.TestCase):
diff --git a/nova/tests/api/openstack/compute/contrib/test_migrations.py b/nova/tests/api/openstack/compute/contrib/test_migrations.py
index bebc51026c..ac18576389 100644
--- a/nova/tests/api/openstack/compute/contrib/test_migrations.py
+++ b/nova/tests/api/openstack/compute/contrib/test_migrations.py
@@ -87,9 +87,9 @@ def test_index(self):
'migrations': migrations.output(migrations_obj)}
for mig in migrations_in_progress['migrations']:
- self.assertTrue('id' in mig)
- self.assertTrue('deleted' not in mig)
- self.assertTrue('deleted_at' not in mig)
+ self.assertIn('id', mig)
+ self.assertNotIn('deleted', mig)
+ self.assertNotIn('deleted_at', mig)
filters = {'host': 'host1', 'status': 'migrating',
'cell_name': 'ChildCell'}
diff --git a/nova/tests/api/openstack/compute/contrib/test_multinic_xs.py b/nova/tests/api/openstack/compute/contrib/test_multinic_xs.py
index f6786686c3..3998240d33 100644
--- a/nova/tests/api/openstack/compute/contrib/test_multinic_xs.py
+++ b/nova/tests/api/openstack/compute/contrib/test_multinic_xs.py
@@ -13,9 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
import webob
from nova import compute
+from nova import exception
from nova import objects
from nova.openstack.common import jsonutils
from nova import test
@@ -90,9 +92,22 @@ def test_add_fixed_ip_no_network(self):
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 422)
+ self.assertEqual(resp.status_int, 400)
self.assertEqual(last_add_fixed_ip, (None, None))
+ @mock.patch.object(compute.api.API, 'add_fixed_ip')
+ def test_add_fixed_ip_no_more_ips_available(self, mock_add_fixed_ip):
+ mock_add_fixed_ip.side_effect = exception.NoMoreFixedIps
+
+ body = dict(addFixedIp=dict(networkId='test_net'))
+ req = webob.Request.blank('/v2/fake/servers/%s/action' % UUID)
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(self.app)
+ self.assertEqual(resp.status_int, 400)
+
def test_remove_fixed_ip(self):
global last_remove_fixed_ip
last_remove_fixed_ip = (None, None)
@@ -118,5 +133,5 @@ def test_remove_fixed_ip_no_address(self):
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 422)
+ self.assertEqual(resp.status_int, 400)
self.assertEqual(last_remove_fixed_ip, (None, None))
diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py
index cf9e78a22f..1ed7dc8c7c 100644
--- a/nova/tests/api/openstack/compute/contrib/test_networks.py
+++ b/nova/tests/api/openstack/compute/contrib/test_networks.py
@@ -19,14 +19,19 @@
import math
import uuid
+import mock
import netaddr
from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import networks_associate
from nova.api.openstack.compute.contrib import os_networks as networks
+from nova.api.openstack.compute.contrib import os_tenant_networks as tnet
+from nova.api.openstack import extensions
import nova.context
from nova import exception
+from nova.network import manager
+from nova import objects
from nova import test
from nova.tests.api.openstack import fakes
import nova.utils
@@ -50,6 +55,8 @@
'dns1': None, 'dns2': None, 'host': 'nsokolov-desktop',
'gateway_v6': None, 'netmask_v6': None, 'priority': None,
'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 387525),
+ 'mtu': None, 'dhcp_server': '10.0.0.1', 'enable_dhcp': True,
+ 'share_address': False,
},
{
'bridge': 'br101', 'vpn_public_port': 1001,
@@ -65,6 +72,8 @@
'multi_host': False, 'dns1': None, 'dns2': None, 'host': None,
'gateway_v6': None, 'netmask_v6': None, 'priority': None,
'created_at': datetime.datetime(2011, 8, 15, 6, 19, 19, 885495),
+ 'mtu': None, 'dhcp_server': '10.0.0.9', 'enable_dhcp': True,
+ 'share_address': False,
},
]
@@ -106,6 +115,8 @@ def disable_vlan(self):
self._vlan_is_disabled = True
def delete(self, context, network_id):
+ if network_id == 'always_delete':
+ return True
if network_id == -1:
raise exception.NetworkInUse(network_id=network_id)
for i, network in enumerate(self.networks):
@@ -196,13 +207,83 @@ def create(self, context, **kwargs):
return new_networks
+# NOTE(vish): tests that network create Exceptions actually return
+# the proper error responses
+class NetworkCreateExceptionsTest(test.TestCase):
+
+ def setUp(self):
+ super(NetworkCreateExceptionsTest, self).setUp()
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {'os-extended-networks': 'fake'}
+
+ class PassthroughAPI():
+ def __init__(self):
+ self.network_manager = manager.FlatDHCPManager()
+
+ def create(self, *args, **kwargs):
+ return self.network_manager.create_networks(*args, **kwargs)
+
+ self.controller = networks.NetworkController(
+ PassthroughAPI(), ext_mgr)
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+
+ def test_network_create_bad_vlan(self):
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['vlan_start'] = 'foo'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_no_cidr(self):
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['cidr'] = ''
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_invalid_fixed_cidr(self):
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['fixed_cidr'] = 'foo'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_invalid_start(self):
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['allowed_start'] = 'foo'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
+ def test_network_create_cidr_conflict(self):
+
+ @staticmethod
+ def get_all(context):
+ ret = objects.NetworkList(context=context, objects=[])
+ net = objects.Network(cidr='10.0.0.0/23')
+ ret.objects.append(net)
+ return ret
+
+ self.stubs.Set(objects.NetworkList, 'get_all', get_all)
+
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['cidr'] = '10.0.0.0/24'
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller.create, req, net)
+
+
class NetworksTest(test.NoDBTestCase):
def setUp(self):
super(NetworksTest, self).setUp()
self.fake_network_api = FakeNetworkAPI()
+ ext_mgr = extensions.ExtensionManager()
+ ext_mgr.extensions = {'os-extended-networks': 'fake'}
self.controller = networks.NetworkController(
- self.fake_network_api)
+ self.fake_network_api,
+ ext_mgr)
self.associate_controller = networks_associate\
.NetworkAssociateActionController(self.fake_network_api)
fakes.stub_out_networking(self.stubs)
@@ -311,13 +392,6 @@ def test_network_delete_in_use(self):
self.assertRaises(webob.exc.HTTPConflict,
self.controller.delete, req, -1)
- def test_network_add_vlan_disabled(self):
- self.fake_network_api.disable_vlan()
- uuid = FAKE_NETWORKS[1]['uuid']
- req = fakes.HTTPRequest.blank('/v2/1234/os-networks/add')
- self.assertRaises(webob.exc.HTTPNotImplemented,
- self.controller.add, req, {'id': uuid})
-
def test_network_add(self):
uuid = FAKE_NETWORKS[1]['uuid']
req = fakes.HTTPRequest.blank('/v2/1234/os-networks/add')
@@ -357,6 +431,29 @@ def test_network_create_large(self):
self.assertEqual(res_dict['network']['cidr'],
large_network['network']['cidr'])
+ def test_network_create_not_extended(self):
+ self.stubs.Set(self.controller, 'extended', False)
+
+ # NOTE(vish): Verify that new params are not passed through if
+ # extension is not enabled.
+ def no_mtu(*args, **kwargs):
+ if 'mtu' in kwargs:
+ raise test.TestingException("mtu should not pass through")
+ return [{}]
+
+ self.stubs.Set(self.controller.network_api, 'create', no_mtu)
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['mtu'] = 9000
+ self.controller.create(req, net)
+
+ def test_network_create_bad_cidr(self):
+ req = fakes.HTTPRequest.blank('/v2/1234/os-networks')
+ net = copy.deepcopy(NEW_NETWORK)
+ net['network']['cidr'] = '128.0.0.0/900'
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, req, net)
+
def test_network_neutron_associate_not_implemented(self):
uuid = FAKE_NETWORKS[1]['uuid']
self.flags(network_api_class='nova.network.neutronv2.api.API')
@@ -394,3 +491,42 @@ def test_network_neutron_disassociate_not_implemented(self):
self.assertRaises(webob.exc.HTTPNotImplemented,
controller._disassociate_host_and_project,
req, uuid, {'disassociate': None})
+
+
+class TenantNetworksTest(test.NoDBTestCase):
+ def setUp(self):
+ super(TenantNetworksTest, self).setUp()
+ self.controller = tnet.NetworkController()
+ self.flags(enable_network_quota=True)
+
+ @mock.patch('nova.quota.QUOTAS.reserve')
+ @mock.patch('nova.quota.QUOTAS.rollback')
+ @mock.patch('nova.network.api.API.delete')
+ def _test_network_delete_exception(self, ex, expex, delete_mock,
+ rollback_mock, reserve_mock):
+ req = fakes.HTTPRequest.blank('/v2/1234/os-tenant-networks')
+ ctxt = req.environ['nova.context']
+
+ reserve_mock.return_value = 'rv'
+ delete_mock.side_effect = ex
+
+ self.assertRaises(expex, self.controller.delete, req, 1)
+
+ delete_mock.assert_called_once_with(ctxt, 1)
+ rollback_mock.assert_called_once_with(ctxt, 'rv')
+ reserve_mock.assert_called_once_with(ctxt, networks=-1)
+
+ def test_network_delete_exception_network_not_found(self):
+ ex = exception.NetworkNotFound(network_id=1)
+ expex = webob.exc.HTTPNotFound
+ self._test_network_delete_exception(ex, expex)
+
+ def test_network_delete_exception_policy_failed(self):
+ ex = exception.PolicyNotAuthorized(action='dummy')
+ expex = webob.exc.HTTPForbidden
+ self._test_network_delete_exception(ex, expex)
+
+ def test_network_delete_exception_network_in_use(self):
+ ex = exception.NetworkInUse(network_id=1)
+ expex = webob.exc.HTTPConflict
+ self._test_network_delete_exception(ex, expex)
diff --git a/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py
index 6b2db76915..50a786e8bd 100644
--- a/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_neutron_security_groups.py
@@ -30,6 +30,7 @@
from nova.network import neutronv2
from nova.network.neutronv2 import api as neutron_api
from nova.network.security_group import neutron_driver
+from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack.compute.contrib import test_security_groups
@@ -174,16 +175,16 @@ def test_delete_security_group_by_admin(self):
def test_delete_security_group_in_use(self):
sg = self._create_sg_template().get('security_group')
self._create_network()
- fake_instance = {'project_id': 'fake_tenant',
- 'availability_zone': 'zone_one',
- 'info_cache': {'network_info': []},
- 'security_groups': [],
- 'uuid': str(uuid.uuid4()),
- 'display_name': 'test_instance'}
+ db_inst = fakes.stub_instance(id=1, nw_cache=[], security_groups=[])
+ _context = context.get_admin_context()
+ instance = instance_obj.Instance._from_db_object(
+ _context, instance_obj.Instance(), db_inst,
+ expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
neutron = neutron_api.API()
- neutron.allocate_for_instance(context.get_admin_context(),
- fake_instance,
- security_groups=[sg['id']])
+ with mock.patch.object(nova.db, 'instance_get_by_uuid',
+ return_value=db_inst):
+ neutron.allocate_for_instance(_context, instance,
+ security_groups=[sg['id']])
req = fakes.HTTPRequest.blank('/v2/fake/os-security-groups/%s'
% sg['id'])
@@ -814,8 +815,14 @@ def list_security_groups(self, **_params):
return {'security_groups': ret}
def list_networks(self, **_params):
- return {'networks':
- [network for network in self._fake_networks.values()]}
+ # neutronv2/api.py _get_available_networks calls this assuming
+ # search_opts filter "shared" is implemented and not ignored
+ shared = _params.get("shared", None)
+ if shared:
+ return {'networks': []}
+ else:
+ return {'networks':
+ [network for network in self._fake_networks.values()]}
def list_ports(self, **_params):
ret = []
diff --git a/nova/tests/api/openstack/compute/contrib/test_quotas.py b/nova/tests/api/openstack/compute/contrib/test_quotas.py
index 57f703ea06..063e319dbd 100644
--- a/nova/tests/api/openstack/compute/contrib/test_quotas.py
+++ b/nova/tests/api/openstack/compute/contrib/test_quotas.py
@@ -418,49 +418,46 @@ def test_deserializer(self):
self.assertEqual(result, exemplar)
-fake_quotas = {'ram': {'limit': 51200,
- 'in_use': 12800,
- 'reserved': 12800},
- 'cores': {'limit': 20,
- 'in_use': 10,
- 'reserved': 5},
- 'instances': {'limit': 100,
- 'in_use': 0,
- 'reserved': 0}}
-
-
-def fake_get_quotas(self, context, id, user_id=None, usages=False):
- if usages:
- return fake_quotas
- else:
- return dict((k, v['limit']) for k, v in fake_quotas.items())
-
-
class ExtendedQuotasTest(test.TestCase):
def setUp(self):
super(ExtendedQuotasTest, self).setUp()
self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager)
self.controller = quotas.QuotaSetsController(self.ext_mgr)
+ self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
+ self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
+ self.mox.ReplayAll()
+
+ fake_quotas = {'ram': {'limit': 51200,
+ 'in_use': 12800,
+ 'reserved': 12800},
+ 'cores': {'limit': 20,
+ 'in_use': 10,
+ 'reserved': 5},
+ 'instances': {'limit': 100,
+ 'in_use': 0,
+ 'reserved': 0}}
+
+ def fake_get_quotas(self, context, id, user_id=None, usages=False):
+ if usages:
+ return self.fake_quotas
+ else:
+ return dict((k, v['limit']) for k, v in self.fake_quotas.items())
def test_quotas_update_exceed_in_used(self):
body = {'quota_set': {'cores': 10}}
self.stubs.Set(quotas.QuotaSetsController, '_get_quotas',
- fake_get_quotas)
+ self.fake_get_quotas)
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
- self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
- self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
- self.mox.ReplayAll()
-
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body)
def test_quotas_force_update_exceed_in_used(self):
self.stubs.Set(quotas.QuotaSetsController, '_get_quotas',
- fake_get_quotas)
+ self.fake_get_quotas)
req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me',
use_admin_context=True)
expected = {'quota_set': {'ram': 25600, 'instances': 200, 'cores': 10}}
@@ -468,13 +465,10 @@ def test_quotas_force_update_exceed_in_used(self):
'instances': 200,
'cores': 10,
'force': 'True'}}
- fake_quotas.get('ram')['limit'] = 25600
- fake_quotas.get('cores')['limit'] = 10
- fake_quotas.get('instances')['limit'] = 200
+ self.fake_quotas.get('ram')['limit'] = 25600
+ self.fake_quotas.get('cores')['limit'] = 10
+ self.fake_quotas.get('instances')['limit'] = 200
- self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True)
- self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True)
- self.mox.ReplayAll()
res_dict = self.controller.update(req, 'update_me', body)
self.assertEqual(res_dict, expected)
diff --git a/nova/tests/api/openstack/compute/contrib/test_rescue.py b/nova/tests/api/openstack/compute/contrib/test_rescue.py
index c533a1d56d..e69196d6ad 100644
--- a/nova/tests/api/openstack/compute/contrib/test_rescue.py
+++ b/nova/tests/api/openstack/compute/contrib/test_rescue.py
@@ -12,7 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from oslo.config import cfg
import webob
@@ -165,35 +164,3 @@ def fake_rescue(*args, **kwargs):
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
-
- @mock.patch('nova.compute.api.API.rescue')
- def test_rescue_raises_not_implemented(self, rescue_mock):
- body = dict(rescue=None)
-
- def fake_rescue(*args, **kwargs):
- raise NotImplementedError('not implemented')
-
- rescue_mock.side_effect = fake_rescue
- req = webob.Request.blank('/v2/fake/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 501)
-
- @mock.patch('nova.compute.api.API.unrescue')
- def test_unrescue_raises_not_implemented(self, unrescue_mock):
- body = dict(unrescue=None)
-
- def fake_unrescue(*args, **kwargs):
- raise NotImplementedError('not implemented')
-
- unrescue_mock.side_effect = fake_unrescue
- req = webob.Request.blank('/v2/fake/servers/test_inst/action')
- req.method = "POST"
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- resp = req.get_response(self.app)
- self.assertEqual(resp.status_int, 501)
diff --git a/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py b/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py
index 0d867b8645..7177ef7cbd 100644
--- a/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py
+++ b/nova/tests/api/openstack/compute/contrib/test_scheduler_hints.py
@@ -13,26 +13,44 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
+
+from oslo.config import cfg
+
from nova.api.openstack import compute
+from nova.api.openstack.compute import plugins
+from nova.api.openstack.compute.plugins.v3 import servers as servers_v21
+from nova.api.openstack.compute import servers as servers_v2
+from nova.api.openstack import extensions
import nova.compute.api
+from nova.compute import flavors
+from nova import db
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
+from nova.tests import fake_instance
+from nova.tests.image import fake
UUID = fakes.FAKE_UUID
-class SchedulerHintsTestCase(test.TestCase):
+CONF = cfg.CONF
+
+
+class SchedulerHintsTestCaseV21(test.TestCase):
def setUp(self):
- super(SchedulerHintsTestCase, self).setUp()
+ super(SchedulerHintsTestCaseV21, self).setUp()
self.fake_instance = fakes.stub_instance(1, uuid=UUID)
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Scheduler_hints'])
- self.app = compute.APIRouter(init_only=('servers',))
+ self._set_up_router()
+
+ def _set_up_router(self):
+ self.app = compute.APIRouterV3(init_only=('servers',
+ 'os-scheduler-hints'))
+
+ def _get_request(self):
+ return fakes.HTTPRequestV3.blank('/servers')
def test_create_server_without_hints(self):
@@ -42,7 +60,7 @@ def fake_create(*args, **kwargs):
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
- req = fakes.HTTPRequest.blank('/fake/servers')
+ req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
body = {'server': {
@@ -63,7 +81,7 @@ def fake_create(*args, **kwargs):
self.stubs.Set(nova.compute.api.API, 'create', fake_create)
- req = fakes.HTTPRequest.blank('/fake/servers')
+ req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
body = {
@@ -80,7 +98,7 @@ def fake_create(*args, **kwargs):
self.assertEqual(202, res.status_int)
def test_create_server_bad_hints(self):
- req = fakes.HTTPRequest.blank('/fake/servers')
+ req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
body = {
@@ -95,3 +113,108 @@ def test_create_server_bad_hints(self):
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
+
+
+class SchedulerHintsTestCaseV2(SchedulerHintsTestCaseV21):
+
+ def _set_up_router(self):
+ self.flags(
+ osapi_compute_extension=[
+ 'nova.api.openstack.compute.contrib.select_extensions'],
+ osapi_compute_ext_list=['Scheduler_hints'])
+ self.app = compute.APIRouter(init_only=('servers',))
+
+ def _get_request(self):
+ return fakes.HTTPRequest.blank('/fake/servers')
+
+
+class ServersControllerCreateTestV21(test.TestCase):
+
+ def setUp(self):
+ """Shared implementation for tests below that create instance."""
+ super(ServersControllerCreateTestV21, self).setUp()
+
+ self.instance_cache_num = 0
+ self._set_up_controller()
+
+ def instance_create(context, inst):
+ inst_type = flavors.get_flavor_by_flavor_id(3)
+ image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
+ def_image_ref = 'http://localhost/images/%s' % image_uuid
+ self.instance_cache_num += 1
+ instance = fake_instance.fake_db_instance(**{
+ 'id': self.instance_cache_num,
+ 'display_name': inst['display_name'] or 'test',
+ 'uuid': fakes.FAKE_UUID,
+ 'instance_type': dict(inst_type),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': 'fead::1234',
+ 'image_ref': inst.get('image_ref', def_image_ref),
+ 'user_id': 'fake',
+ 'project_id': 'fake',
+ 'reservation_id': inst['reservation_id'],
+ "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
+ "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
+ "progress": 0,
+ "fixed_ips": [],
+ "task_state": "",
+ "vm_state": "",
+ "root_device_name": inst.get('root_device_name', 'vda'),
+ })
+
+ return instance
+
+ fake.stub_out_image_service(self.stubs)
+ self.stubs.Set(db, 'instance_create', instance_create)
+
+ def _set_up_controller(self):
+ ext_info = plugins.LoadedExtensionInfo()
+ CONF.set_override('extensions_blacklist', 'os-scheduler-hints',
+ 'osapi_v3')
+ self.no_scheduler_hints_controller = servers_v21.ServersController(
+ extension_info=ext_info)
+
+ def _verify_availability_zone(self, **kwargs):
+ self.assertNotIn('scheduler_hints', kwargs)
+
+ def _get_request(self):
+ return fakes.HTTPRequestV3.blank('/servers')
+
+ def _test_create_extra(self, params):
+ image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
+ body = dict(server=server)
+ body.update(params)
+ req = self._get_request()
+ req.method = 'POST'
+ req.body = jsonutils.dumps(body)
+ req.headers["content-type"] = "application/json"
+ server = self.no_scheduler_hints_controller.create(
+ req, body=body).obj['server']
+
+ def test_create_instance_with_scheduler_hints_disabled(self):
+ hints = {'same_host': '48e6a9f6-30af-47e0-bc04-acaed113bb4e'}
+ params = {'OS-SCH-HNT:scheduler_hints': hints}
+ old_create = nova.compute.api.API.create
+
+ def create(*args, **kwargs):
+ self._verify_availability_zone(**kwargs)
+ return old_create(*args, **kwargs)
+
+ self.stubs.Set(nova.compute.api.API, 'create', create)
+ self._test_create_extra(params)
+
+
+class ServersControllerCreateTestV2(ServersControllerCreateTestV21):
+
+ def _set_up_controller(self):
+ self.ext_mgr = extensions.ExtensionManager()
+ self.ext_mgr.extensions = {}
+ self.no_scheduler_hints_controller = servers_v2.Controller(
+ self.ext_mgr)
+
+ def _verify_availability_zone(self, **kwargs):
+ self.assertEqual(kwargs['scheduler_hints'], {})
+
+ def _get_request(self):
+ return fakes.HTTPRequest.blank('/fake/servers')
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py b/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py
index ad0bd6a8d7..a09ea3babf 100644
--- a/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py
+++ b/nova/tests/api/openstack/compute/contrib/test_security_group_default_rules.py
@@ -48,6 +48,39 @@ def security_group_default_rule_db(security_group_default_rule, id=None):
return AttrDict(attrs)
+class TestSecurityGroupDefaultRulesNeutron(test.TestCase):
+ def setUp(self):
+ self.flags(security_group_api='neutron')
+ super(TestSecurityGroupDefaultRulesNeutron, self).setUp()
+ self.controller = \
+ security_group_default_rules.SecurityGroupDefaultRulesController()
+
+ def test_create_security_group_default_rule_not_implemented_neutron(self):
+ sgr = security_group_default_rule_template()
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.create,
+ req, {'security_group_default_rule': sgr})
+
+ def test_security_group_default_rules_list_not_implemented_neturon(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.index,
+ req)
+
+ def test_security_group_default_rules_show_not_implemented_neturon(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.show,
+ req, '602ed77c-a076-4f9b-a617-f93b847b62c5')
+
+ def test_security_group_default_rules_delete_not_implemented_neturon(self):
+ req = fakes.HTTPRequest.blank(
+ '/v2/fake/os-security-group-default-rules', use_admin_context=True)
+ self.assertRaises(webob.exc.HTTPNotImplemented, self.controller.delete,
+ req, '602ed77c-a076-4f9b-a617-f93b847b62c5')
+
+
class TestSecurityGroupDefaultRules(test.TestCase):
def setUp(self):
super(TestSecurityGroupDefaultRules, self).setUp()
diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
index e6a3345571..2b55f854aa 100644
--- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py
@@ -82,7 +82,7 @@ def security_group_rule_db(rule, id=None):
def return_server(context, server_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'id': int(server_id),
'power_state': 0x01,
@@ -93,7 +93,7 @@ def return_server(context, server_id,
def return_server_by_uuid(context, server_uuid,
columns_to_join=None,
- use_slave=False):
+ use_subordinate=False):
return fake_instance.fake_db_instance(
**{'id': 1,
'power_state': 0x01,
@@ -287,8 +287,7 @@ def test_create_security_group_quota_limit(self):
self.assertEqual(res_dict['security_group']['name'], name)
sg = security_group_template()
- self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
- self.controller.create,
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
req, {'security_group': sg})
def test_get_security_group_list(self):
@@ -403,7 +402,7 @@ def test_get_security_group_by_instance(self):
expected = {'security_groups': groups}
def return_instance(context, server_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
@@ -430,7 +429,7 @@ def test_get_security_group_empty_for_instance(self, mock_sec_group,
expected = {'security_groups': []}
def return_instance(context, server_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
mock_db_get_ins.side_effect = return_instance
@@ -983,13 +982,21 @@ def test_create_with_invalid_parent_group_id(self):
req, {'security_group_rule': rule})
def test_create_with_non_existing_parent_group_id(self):
- rule = security_group_rule_template(group_id='invalid',
+ rule = security_group_rule_template(group_id=None,
parent_group_id=self.invalid_id)
req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, {'security_group_rule': rule})
+ def test_create_with_non_existing_group_id(self):
+ rule = security_group_rule_template(group_id='invalid',
+ parent_group_id=self.sg2['id'])
+
+ req = fakes.HTTPRequest.blank('/v2/fake/os-security-group-rules')
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
+ req, {'security_group_rule': rule})
+
def test_create_with_invalid_protocol(self):
rule = security_group_rule_template(ip_protocol='invalid-protocol',
cidr='10.2.2.0/24',
@@ -1235,8 +1242,7 @@ def test_create_rule_quota_limit(self):
'ip_protocol': 'tcp', 'from_port': '121', 'to_port': '121',
'parent_group_id': self.sg2['id'], 'group_id': self.sg1['id']
}
- self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
- self.controller.create,
+ self.assertRaises(webob.exc.HTTPForbidden, self.controller.create,
req, {'security_group_rule': rule})
def test_create_rule_cidr_allow_all(self):
@@ -1700,7 +1706,7 @@ def construct(self):
root.set('id')
root.set('imageRef')
root.set('flavorRef')
- return xmlutil.MasterTemplate(root, 1,
+ return xmlutil.MainTemplate(root, 1,
nsmap={None: xmlutil.XMLNS_V11})
def _encode_body(self, body):
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py b/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py
index ad0a133987..6ef2400467 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_diagnostics.py
@@ -80,6 +80,16 @@ def test_get_diagnostics_raise_conflict_on_invalid_state(self,
res = req.get_response(self.router)
self.assertEqual(409, res.status_int)
+ @mock.patch.object(compute_api.API, 'get_diagnostics',
+ side_effect=NotImplementedError)
+ @mock.patch.object(compute_api.API, 'get', fake_instance_get)
+ def test_get_diagnostics_raise_no_notimplementederror(self,
+ mock_get_diagnostics):
+ req = fakes.HTTPRequest.blank(
+ '/fake/servers/%s/diagnostics' % UUID)
+ res = req.get_response(self.router)
+ self.assertEqual(501, res.status_int)
+
class TestServerDiagnosticsXMLSerializer(test.NoDBTestCase):
namespace = wsgi.XMLNS_V11
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_external_events.py b/nova/tests/api/openstack/compute/contrib/test_server_external_events.py
index 4091116a47..5439ce79db 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_external_events.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_external_events.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
import mock
import webob
@@ -21,6 +19,7 @@
from nova import context
from nova import exception
from nova import objects
+from nova.openstack.common import jsonutils
from nova import test
fake_instances = {
@@ -30,9 +29,11 @@
uuid='00000000-0000-0000-0000-000000000002', host='host1'),
'00000000-0000-0000-0000-000000000003': objects.Instance(
uuid='00000000-0000-0000-0000-000000000003', host='host2'),
+ '00000000-0000-0000-0000-000000000004': objects.Instance(
+ uuid='00000000-0000-0000-0000-000000000004', host=None),
}
fake_instance_uuids = sorted(fake_instances.keys())
-MISSING_UUID = '00000000-0000-0000-0000-000000000004'
+MISSING_UUID = '00000000-0000-0000-0000-000000000005'
@classmethod
@@ -49,23 +50,27 @@ def setUp(self):
super(ServerExternalEventsTest, self).setUp()
self.api = server_external_events.ServerExternalEventsController()
self.context = context.get_admin_context()
- self.default_body = {
- 'events': [
- {'name': 'network-vif-plugged',
- 'tag': 'foo',
- 'status': 'completed',
- 'server_uuid': fake_instance_uuids[0]},
- {'name': 'network-changed',
- 'server_uuid': fake_instance_uuids[1]},
- ]
- }
+ self.event_1 = {'name': 'network-vif-plugged',
+ 'tag': 'foo',
+ 'server_uuid': fake_instance_uuids[0]}
+ self.event_2 = {'name': 'network-changed',
+ 'server_uuid': fake_instance_uuids[1]}
+ self.default_body = {'events': [self.event_1, self.event_2]}
+ self.resp_event_1 = dict(self.event_1)
+ self.resp_event_1['code'] = 200
+ self.resp_event_1['status'] = 'completed'
+ self.resp_event_2 = dict(self.event_2)
+ self.resp_event_2['code'] = 200
+ self.resp_event_2['status'] = 'completed'
+ self.default_resp_body = {'events': [self.resp_event_1,
+ self.resp_event_2]}
def _create_req(self, body):
req = webob.Request.blank('/v2/fake/os-server-external-events')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
- req.body = json.dumps(body)
+ req.body = jsonutils.dumps(body)
return req
def _assert_call(self, req, body, expected_uuids, expected_events):
@@ -91,7 +96,7 @@ def test_create(self):
fake_instance_uuids[:2],
['network-vif-plugged',
'network-changed'])
- self.assertEqual(self.default_body, result)
+ self.assertEqual(self.default_resp_body, result)
self.assertEqual(200, code)
def test_create_one_bad_instance(self):
@@ -105,6 +110,19 @@ def test_create_one_bad_instance(self):
self.assertEqual(404, result['events'][1]['code'])
self.assertEqual(207, code)
+ def test_create_event_instance_has_no_host(self):
+ body = self.default_body
+ body['events'][0]['server_uuid'] = fake_instance_uuids[-1]
+ req = self._create_req(body)
+ # the instance without host should not be passed to the compute layer
+ result, code = self._assert_call(req, body,
+ [fake_instance_uuids[1]],
+ ['network-changed'])
+ self.assertEqual(422, result['events'][0]['code'])
+ self.assertEqual('failed', result['events'][0]['status'])
+ self.assertEqual(200, result['events'][1]['code'])
+ self.assertEqual(207, code)
+
def test_create_no_good_instances(self):
body = self.default_body
body['events'][0]['server_uuid'] = MISSING_UUID
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_groups.py b/nova/tests/api/openstack/compute/contrib/test_server_groups.py
index 9a2a76932a..54b1241d71 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_groups.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_groups.py
@@ -48,7 +48,6 @@ def server_group_resp_template(**kwargs):
sgroup.setdefault('name', 'test')
sgroup.setdefault('policies', [])
sgroup.setdefault('members', [])
- sgroup.setdefault('metadata', {})
return sgroup
@@ -66,10 +65,6 @@ def server_group_db(sg):
attrs['members'] = members
else:
attrs['members'] = []
- if 'metadata' in attrs:
- attrs['metadetails'] = attrs.pop('metadata')
- else:
- attrs['metadetails'] = {}
attrs['deleted'] = 0
attrs['deleted_at'] = None
attrs['created_at'] = None
@@ -257,7 +252,7 @@ def test_list_server_group_by_tenant(self):
groups = []
policies = ['anti-affinity']
members = []
- metadata = {'key1': 'value1'}
+ metadata = {} # always empty
names = ['default-x', 'test']
sg1 = server_group_resp_template(id=str(1345),
name=names[0],
@@ -287,7 +282,7 @@ def test_list_server_group_all(self):
tenant_groups = []
policies = ['anti-affinity']
members = []
- metadata = {'key1': 'value1'}
+ metadata = {} # always empty
names = ['default-x', 'test']
sg1 = server_group_resp_template(id=str(1345),
name=names[0],
@@ -428,7 +423,6 @@ def _tag(self, elem):
def _verify_server_group(self, raw_group, tree):
policies = raw_group['policies']
members = raw_group['members']
- metadata = raw_group['metadata']
self.assertEqual('server_group', self._tag(tree))
self.assertEqual(raw_group['id'], tree.get('id'))
self.assertEqual(raw_group['name'], tree.get('name'))
@@ -448,16 +442,7 @@ def _verify_server_group(self, raw_group, tree):
self.assertEqual(members[idx],
gr_child.text)
elif child_tag == 'metadata':
- self.assertEqual(len(metadata), len(child))
- metas = {}
- for idx, gr_child in enumerate(child):
- self.assertEqual(self._tag(gr_child), 'meta')
- key = gr_child.get('key')
- self.assertTrue(key in ['key1', 'key2'])
- metas[key] = gr_child.text
- self.assertEqual(len(metas), len(metadata))
- for k in metadata:
- self.assertEqual(metadata[k], metas[k])
+ self.assertEqual(0, len(child))
def _verify_server_group_brief(self, raw_group, tree):
self.assertEqual('server_group', self._tag(tree))
@@ -467,13 +452,11 @@ def _verify_server_group_brief(self, raw_group, tree):
def test_group_serializer(self):
policies = ["policy-1", "policy-2"]
members = ["1", "2"]
- metadata = dict(key1="value1", key2="value2")
raw_group = dict(
id='890',
name='name',
policies=policies,
- members=members,
- metadata=metadata)
+ members=members)
sg_group = dict(server_group=raw_group)
text = self.default_serializer.serialize(sg_group)
@@ -485,19 +468,16 @@ def test_groups_serializer(self):
policies = ["policy-1", "policy-2",
"policy-3"]
members = ["1", "2", "3"]
- metadata = dict(key1="value1", key2="value2")
groups = [dict(
id='890',
name='test',
policies=policies[0:2],
- members=members[0:2],
- metadata=metadata),
+ members=members[0:2]),
dict(
id='123',
name='default',
policies=policies[2:],
- members=members[2:],
- metadata=metadata)]
+ members=members[2:])]
sg_groups = dict(server_groups=groups)
text = self.index_serializer.serialize(sg_groups)
diff --git a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
index bc5b132258..761f29c511 100644
--- a/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
+++ b/nova/tests/api/openstack/compute/contrib/test_server_start_stop.py
@@ -26,7 +26,7 @@
def fake_instance_get(context, instance_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
result = fakes.stub_instance(id=1, uuid=instance_id)
result['created_at'] = None
result['deleted_at'] = None
diff --git a/nova/tests/api/openstack/compute/contrib/test_shelve.py b/nova/tests/api/openstack/compute/contrib/test_shelve.py
index 43f852019c..8ab472c002 100644
--- a/nova/tests/api/openstack/compute/contrib/test_shelve.py
+++ b/nova/tests/api/openstack/compute/contrib/test_shelve.py
@@ -28,7 +28,7 @@
def fake_instance_get_by_uuid(context, instance_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' % context.project_id})
diff --git a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
index 9dc17edf5b..4a94be2241 100644
--- a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
+++ b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py
@@ -271,6 +271,25 @@ def test_get_tenants_usage_with_invalid_start_date(self):
init_only=('os-simple-tenant-usage',)))
self.assertEqual(res.status_int, 400)
+ def _test_get_tenants_usage_with_one_date(self, date_url_param):
+ req = webob.Request.blank(
+ '/v2/faketenant_0/os-simple-tenant-usage/'
+ 'faketenant_0?%s' % date_url_param)
+ req.method = "GET"
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app(
+ fake_auth_context=self.user_context,
+ init_only=('os-simple-tenant-usage',)))
+ self.assertEqual(200, res.status_int)
+
+ def test_get_tenants_usage_with_no_start_date(self):
+ self._test_get_tenants_usage_with_one_date(
+ 'end=%s' % (NOW + datetime.timedelta(5)).isoformat())
+
+ def test_get_tenants_usage_with_no_end_date(self):
+ self._test_get_tenants_usage_with_one_date(
+ 'start=%s' % (NOW - datetime.timedelta(5)).isoformat())
+
class SimpleTenantUsageSerializerTest(test.TestCase):
def _verify_server_usage(self, raw_usage, tree):
diff --git a/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py b/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py
index 59947e5922..a14e312cc9 100644
--- a/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py
+++ b/nova/tests/api/openstack/compute/contrib/test_virtual_interfaces.py
@@ -82,7 +82,7 @@ def test_vif_instance_not_found(self):
compute_api.API.get(fake_context, 'fake_uuid',
expected_attrs=None,
- want_objects=False).AndRaise(
+ want_objects=True).AndRaise(
exception.InstanceNotFound(instance_id='instance-0000'))
self.mox.ReplayAll()
diff --git a/nova/tests/api/openstack/compute/contrib/test_volumes.py b/nova/tests/api/openstack/compute/contrib/test_volumes.py
index 3c37a7f419..efa15c9d46 100644
--- a/nova/tests/api/openstack/compute/contrib/test_volumes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_volumes.py
@@ -97,7 +97,7 @@ def fake_compute_volume_snapshot_create(self, context, volume_id,
pass
-def fake_bdms_get_all_by_instance(context, instance_uuid, use_slave=False):
+def fake_bdms_get_all_by_instance(context, instance_uuid, use_subordinate=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'instance_uuid': instance_uuid,
diff --git a/nova/tests/api/openstack/compute/extensions/foxinsocks.py b/nova/tests/api/openstack/compute/extensions/foxinsocks.py
index 5785f1037a..7d1e273ea7 100644
--- a/nova/tests/api/openstack/compute/extensions/foxinsocks.py
+++ b/nova/tests/api/openstack/compute/extensions/foxinsocks.py
@@ -45,7 +45,7 @@ def _fail(self, req, id, body):
class FoxInSocksFlavorGooseControllerExtension(wsgi.Controller):
@wsgi.extends
def show(self, req, resp_obj, id):
- #NOTE: This only handles JSON responses.
+ # NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
resp_obj.obj['flavor']['googoose'] = req.GET.get('chewing')
@@ -53,7 +53,7 @@ def show(self, req, resp_obj, id):
class FoxInSocksFlavorBandsControllerExtension(wsgi.Controller):
@wsgi.extends
def show(self, req, resp_obj, id):
- #NOTE: This only handles JSON responses.
+ # NOTE: This only handles JSON responses.
# You can use content type header to test for XML.
resp_obj.obj['big_bands'] = 'Pig Bands!'
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_admin_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_admin_actions.py
index 97265aa12c..728e85a943 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_admin_actions.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_admin_actions.py
@@ -196,16 +196,16 @@ def setUp(self):
self.context = self.request.environ['nova.context']
def test_no_state(self):
- self.assertRaises(webob.exc.HTTPBadRequest,
+ self.assertRaises(exception.ValidationError,
self.admin_api._reset_state,
self.request, self.uuid,
- {"reset_state": None})
+ body={"reset_state": None})
def test_bad_state(self):
- self.assertRaises(webob.exc.HTTPBadRequest,
+ self.assertRaises(exception.ValidationError,
self.admin_api._reset_state,
self.request, self.uuid,
- {"reset_state": {"state": "spam"}})
+ body={"reset_state": {"state": "spam"}})
def test_no_instance(self):
self.mox.StubOutWithMock(self.compute_api, 'get')
@@ -218,7 +218,7 @@ def test_no_instance(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.admin_api._reset_state,
self.request, self.uuid,
- {"reset_state": {"state": "active"}})
+ body={"reset_state": {"state": "active"}})
def _setup_mock(self, expected):
instance = objects.Instance()
@@ -248,8 +248,8 @@ def test_reset_active(self):
self.mox.ReplayAll()
body = {"reset_state": {"state": "active"}}
- result = self.admin_api._reset_state(self.request, self.uuid, body)
-
+ result = self.admin_api._reset_state(self.request, self.uuid,
+ body=body)
self.assertEqual(202, result.status_int)
def test_reset_error(self):
@@ -257,6 +257,6 @@ def test_reset_error(self):
task_state=None))
self.mox.ReplayAll()
body = {"reset_state": {"state": "error"}}
- result = self.admin_api._reset_state(self.request, self.uuid, body)
-
+ result = self.admin_api._reset_state(self.request, self.uuid,
+ body=body)
self.assertEqual(202, result.status_int)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_agents.py b/nova/tests/api/openstack/compute/plugins/v3/test_agents.py
index e7a07fed2d..a4b140214d 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_agents.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_agents.py
@@ -23,25 +23,25 @@
fake_agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
- 'url': 'xxx://xxxx/xxx/xxx1',
+ 'url': 'http://example.com/path/to/resource1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
- 'url': 'xxx://xxxx/xxx/xxx2',
+ 'url': 'http://example.com/path/to/resource2',
'md5hash': 'add6bb58e139be103324d04d82d8f547',
'id': 3},
{'hypervisor': 'xen', 'os': 'win',
'architecture': 'power',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx3',
+ 'url': 'http://example.com/path/to/resource3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'id': 4},
]
@@ -109,13 +109,13 @@ def test_agents_create(self):
'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
response = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1}}
res_dict = self.controller.create(req, body=body)
@@ -130,7 +130,7 @@ def test_agents_create_with_existed_agent(self):
'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exc.HTTPConflict, self.controller.create, req,
body=body)
@@ -141,7 +141,7 @@ def test_agents_create_without_md5hash(self):
'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx'}}
+ 'url': 'http://example.com/path/to/resource'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
@@ -160,7 +160,7 @@ def test_agents_create_without_version(self):
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
@@ -170,7 +170,7 @@ def test_agents_create_without_architecture(self):
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
@@ -180,7 +180,7 @@ def test_agents_create_without_os(self):
body = {'agent': {'hypervisor': 'kvm',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
@@ -190,7 +190,7 @@ def test_agents_create_without_hypervisor(self):
body = {'agent': {'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
@@ -213,7 +213,7 @@ def _test_agents_create_with_invalid_length(self, key):
'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
body['agent'][key] = 'x' * 256
self.assertRaises(exception.ValidationError, self.controller.create,
@@ -247,25 +247,25 @@ def test_agents_list(self):
agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
- 'url': 'xxx://xxxx/xxx/xxx1',
+ 'url': 'http://example.com/path/to/resource1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
- 'url': 'xxx://xxxx/xxx/xxx2',
+ 'url': 'http://example.com/path/to/resource2',
'md5hash': 'add6bb58e139be103324d04d82d8f547',
'agent_id': 3},
{'hypervisor': 'xen', 'os': 'win',
'architecture': 'power',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx3',
+ 'url': 'http://example.com/path/to/resource3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'agent_id': 4},
]
@@ -277,13 +277,13 @@ def test_agents_list_with_hypervisor(self):
response = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
- 'url': 'xxx://xxxx/xxx/xxx1',
+ 'url': 'http://example.com/path/to/resource1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
]
@@ -292,11 +292,11 @@ def test_agents_list_with_hypervisor(self):
def test_agents_update(self):
req = FakeRequest()
body = {'agent': {'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
response = {'agent': {'agent_id': 1,
'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
res_dict = self.controller.update(req, 1, body=body)
self.assertEqual(res_dict, response)
@@ -304,7 +304,7 @@ def test_agents_update(self):
def test_agents_update_without_md5hash(self):
req = FakeRequest()
body = {'agent': {'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx'}}
+ 'url': 'http://example.com/path/to/resource'}}
self.assertRaises(exception.ValidationError, self.controller.update,
req, 1, body=body)
@@ -335,7 +335,7 @@ def test_agents_update_with_empty(self):
def _test_agents_update_with_invalid_length(self, key):
req = FakeRequest()
body = {'agent': {'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
body['agent'][key] = 'x' * 256
self.assertRaises(exception.ValidationError, self.controller.update,
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py b/nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py
index 61db9eb2a7..5be8c0919f 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_aggregates.py
@@ -467,6 +467,18 @@ def stub_update_aggregate(context, aggregate, values):
self.assertEqual(AGGREGATE, result["aggregate"])
+ def test_set_metadata_delete(self):
+ body = {"set_metadata": {"metadata": {"foo": None}}}
+
+ with mock.patch.object(self.controller.api,
+ 'update_aggregate_metadata') as mocked:
+ mocked.return_value = AGGREGATE
+ result = self.controller._set_metadata(self.req, "1", body=body)
+
+ self.assertEqual(AGGREGATE, result["aggregate"])
+ mocked.assert_called_once_with(self.context, "1",
+ body["set_metadata"]["metadata"])
+
def test_set_metadata_no_admin(self):
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._set_metadata,
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_attach_interfaces.py b/nova/tests/api/openstack/compute/plugins/v3/test_attach_interfaces.py
deleted file mode 100644
index 51a4212fc2..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_attach_interfaces.py
+++ /dev/null
@@ -1,409 +0,0 @@
-# Copyright 2012 SINA Inc.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo.config import cfg
-
-from nova.api.openstack.compute.plugins.v3 import attach_interfaces
-from nova.compute import api as compute_api
-from nova import context
-from nova import exception
-from nova.network import api as network_api
-from nova.openstack.common import jsonutils
-from nova import test
-from nova.tests import fake_network_cache_model
-
-import webob
-from webob import exc
-
-
-CONF = cfg.CONF
-
-FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
-FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
-
-FAKE_PORT_ID1 = '11111111-1111-1111-1111-111111111111'
-FAKE_PORT_ID2 = '22222222-2222-2222-2222-222222222222'
-FAKE_PORT_ID3 = '33333333-3333-3333-3333-333333333333'
-
-FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444'
-FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555'
-FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666'
-
-port_data1 = {
- "id": FAKE_PORT_ID1,
- "network_id": FAKE_NET_ID1,
- "admin_state_up": True,
- "status": "ACTIVE",
- "mac_address": "aa:aa:aa:aa:aa:aa",
- "fixed_ips": ["10.0.1.2"],
- "device_id": FAKE_UUID1,
-}
-
-port_data2 = {
- "id": FAKE_PORT_ID2,
- "network_id": FAKE_NET_ID2,
- "admin_state_up": True,
- "status": "ACTIVE",
- "mac_address": "bb:bb:bb:bb:bb:bb",
- "fixed_ips": ["10.0.2.2"],
- "device_id": FAKE_UUID1,
-}
-
-port_data3 = {
- "id": FAKE_PORT_ID3,
- "network_id": FAKE_NET_ID3,
- "admin_state_up": True,
- "status": "ACTIVE",
- "mac_address": "bb:bb:bb:bb:bb:bb",
- "fixed_ips": ["10.0.2.2"],
- "device_id": '',
-}
-
-fake_networks = [FAKE_NET_ID1, FAKE_NET_ID2]
-ports = [port_data1, port_data2, port_data3]
-
-
-def fake_list_ports(self, *args, **kwargs):
- result = []
- for port in ports:
- if port['device_id'] == kwargs['device_id']:
- result.append(port)
- return {'ports': result}
-
-
-def fake_show_port(self, context, port_id, **kwargs):
- for port in ports:
- if port['id'] == port_id:
- return {'port': port}
-
-
-def fake_attach_interface(self, context, instance, network_id, port_id,
- requested_ip='192.168.1.3'):
- if not network_id:
- # if no network_id is given when add a port to an instance, use the
- # first default network.
- network_id = fake_networks[0]
- if network_id == 'bad_id':
- raise exception.NetworkNotFound(network_id=network_id)
- if not port_id:
- port_id = ports[fake_networks.index(network_id)]['id']
- vif = fake_network_cache_model.new_vif()
- vif['id'] = port_id
- vif['network']['id'] = network_id
- vif['network']['subnets'][0]['ips'][0]['address'] = requested_ip
- return vif
-
-
-def fake_detach_interface(self, context, instance, port_id):
- for port in ports:
- if port['id'] == port_id:
- return
- raise exception.PortNotFound(port_id=port_id)
-
-
-def fake_get_instance(self, *args, **kwargs):
- return {}
-
-
-class InterfaceAttachTests(test.NoDBTestCase):
- def setUp(self):
- super(InterfaceAttachTests, self).setUp()
- self.flags(auth_strategy=None, group='neutron')
- self.flags(url='http://anyhost/', group='neutron')
- self.flags(url_timeout=30, group='neutron')
- self.stubs.Set(network_api.API, 'show_port', fake_show_port)
- self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
- self.stubs.Set(compute_api.API, 'get', fake_get_instance)
- self.context = context.get_admin_context()
- self.expected_show = {'interface_attachment':
- {'net_id': FAKE_NET_ID1,
- 'port_id': FAKE_PORT_ID1,
- 'mac_addr': port_data1['mac_address'],
- 'port_state': port_data1['status'],
- 'fixed_ips': port_data1['fixed_ips'],
- }}
-
- def test_item_instance_not_found(self):
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v3/servers/fake/os-attach-interfaces/')
- req.method = 'GET'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- def fake_get_instance_exception(self, context, instance_uuid,
- **kwargs):
- raise exception.InstanceNotFound(instance_id=instance_uuid)
-
- self.stubs.Set(compute_api.API, 'get', fake_get_instance_exception)
- self.assertRaises(exc.HTTPNotFound, attachments.index,
- req, 'fake')
-
- def test_show(self):
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v3/servers/fake/os-attach-interfaces/show')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- result = attachments.show(req, FAKE_UUID1, FAKE_PORT_ID1)
- self.assertEqual(self.expected_show, result)
-
- def test_show_instance_not_found(self):
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v3/servers/fake/os-attach-interfaces/show')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- def fake_get_instance_exception(self, context, instance_uuid,
- **kwargs):
- raise exception.InstanceNotFound(instance_id=instance_uuid)
-
- self.stubs.Set(compute_api.API, 'get', fake_get_instance_exception)
- self.assertRaises(exc.HTTPNotFound, attachments.show,
- req, 'fake', FAKE_PORT_ID1)
-
- def test_show_invalid(self):
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank('/v3/servers/fake/os-attach-interfaces/show')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(exc.HTTPNotFound,
- attachments.show, req, FAKE_UUID2, FAKE_PORT_ID1)
-
- def test_delete(self):
- self.stubs.Set(compute_api.API, 'detach_interface',
- fake_detach_interface)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank(
- '/v3/servers/fake/os-attach-interfaces/delete')
- req.method = 'DELETE'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- result = attachments.delete(req, FAKE_UUID1, FAKE_PORT_ID1)
- self.assertEqual('202 Accepted', result.status)
-
- def test_detach_interface_instance_locked(self):
- def fake_detach_interface_from_locked_server(self, context,
- instance, port_id):
- raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
-
- self.stubs.Set(compute_api.API,
- 'detach_interface',
- fake_detach_interface_from_locked_server)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank(
- '/v3/servers/fake/os-attach-interfaces/delete')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(exc.HTTPConflict,
- attachments.delete,
- req,
- FAKE_UUID1,
- FAKE_PORT_ID1)
-
- def test_delete_interface_not_found(self):
- self.stubs.Set(compute_api.API, 'detach_interface',
- fake_detach_interface)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank(
- '/v3/servers/fake/os-attach-interfaces/delete')
- req.method = 'DELETE'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- self.assertRaises(exc.HTTPNotFound,
- attachments.delete,
- req,
- FAKE_UUID1,
- 'invaid-port-id')
-
- def test_delete_instance_not_found(self):
- self.stubs.Set(compute_api.API, 'detach_interface',
- fake_detach_interface)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank(
- '/v3/servers/fake/os-attach-interfaces/delete')
- req.method = 'DELETE'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- def fake_get_instance_exception(self, context, instance_uuid,
- **kwargs):
- raise exception.InstanceNotFound(instance_id=instance_uuid)
-
- self.stubs.Set(compute_api.API, 'get', fake_get_instance_exception)
- self.assertRaises(exc.HTTPNotFound,
- attachments.delete,
- req,
- 'fake',
- 'invaid-port-id')
-
- def test_attach_interface_instance_locked(self):
- def fake_attach_interface_to_locked_server(self, context,
- instance, network_id, port_id, requested_ip):
- raise exception.InstanceIsLocked(instance_uuid=FAKE_UUID1)
-
- self.stubs.Set(compute_api.API,
- 'attach_interface',
- fake_attach_interface_to_locked_server)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank(
- '/v3/servers/fake/os-attach-interfaces/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- self.assertRaises(exc.HTTPConflict,
- attachments.create, req, FAKE_UUID1,
- body=jsonutils.loads(req.body))
-
- def test_attach_interface_without_network_id(self):
- self.stubs.Set(compute_api.API, 'attach_interface',
- fake_attach_interface)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank(
- '/v3/servers/fake/os-attach-interfaces/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- result = attachments.create(req, FAKE_UUID1,
- body=jsonutils.loads(req.body))
- self.assertEqual(result['interface_attachment']['net_id'],
- FAKE_NET_ID1)
-
- def test_attach_interface_with_network_id(self):
- self.stubs.Set(compute_api.API, 'attach_interface',
- fake_attach_interface)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank(
- '/v3/servers/fake/os-attach-interfaces/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps({'interface_attachment':
- {'net_id': FAKE_NET_ID2}})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- result = attachments.create(req,
- FAKE_UUID1, body=jsonutils.loads(req.body))
- self.assertEqual(result['interface_attachment']['net_id'],
- FAKE_NET_ID2)
-
- def test_attach_interface_with_port_and_network_id(self):
- self.stubs.Set(compute_api.API, 'attach_interface',
- fake_attach_interface)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank(
- '/v3/servers/fake/os-attach-interfaces/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps({'interface_attachment':
- {'port_id': FAKE_PORT_ID1,
- 'net_id': FAKE_NET_ID2}})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- self.assertRaises(exc.HTTPBadRequest,
- attachments.create, req, FAKE_UUID1,
- body=jsonutils.loads(req.body))
-
- def test_attach_interface_instance_not_found(self):
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank(
- '/v3/servers/fake/os-attach-interfaces/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps({'interface_attachment':
- {'net_id': FAKE_NET_ID2}})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
-
- def fake_get_instance_exception(self, context, instance_uuid,
- **kwargs):
- raise exception.InstanceNotFound(instance_id=instance_uuid)
-
- self.stubs.Set(compute_api.API, 'get', fake_get_instance_exception)
- self.assertRaises(exc.HTTPNotFound,
- attachments.create, req, 'fake',
- body=jsonutils.loads(req.body))
-
- def _test_attach_interface_with_invalid_parameter(self, param):
- self.stubs.Set(compute_api.API, 'attach_interface',
- fake_attach_interface)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank(
- '/v3/servers/fake/os-attach-interfaces/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps({'interface_attachment': param})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- self.assertRaises(exception.ValidationError,
- attachments.create, req, FAKE_UUID1,
- body=jsonutils.loads(req.body))
-
- def test_attach_interface_instance_with_non_uuid_net_id(self):
- param = {'net_id': 'non_uuid'}
- self._test_attach_interface_with_invalid_parameter(param)
-
- def test_attach_interface_instance_with_non_uuid_port_id(self):
- param = {'port_id': 'non_uuid'}
- self._test_attach_interface_with_invalid_parameter(param)
-
- def test_attach_interface_instance_with_non_array_fixed_ips(self):
- param = {'fixed_ips': 'non_array'}
- self._test_attach_interface_with_invalid_parameter(param)
-
-
-class InterfaceAttachTestsWithMock(test.NoDBTestCase):
- def setUp(self):
- super(InterfaceAttachTestsWithMock, self).setUp()
- self.flags(auth_strategy=None, group='neutron')
- self.flags(url='http://anyhost/', group='neutron')
- self.flags(url_timeout=30, group='neutron')
- self.context = context.get_admin_context()
-
- @mock.patch.object(compute_api.API, 'get')
- @mock.patch.object(compute_api.API, 'attach_interface')
- def test_attach_interface_fixed_ip_already_in_use(self,
- attach_mock,
- get_mock):
- get_mock.side_effect = fake_get_instance
- attach_mock.side_effect = exception.FixedIpAlreadyInUse(
- address='10.0.3.2', instance_uuid=FAKE_UUID1)
- attachments = attach_interfaces.InterfaceAttachmentController()
- req = webob.Request.blank(
- '/v3/servers/fake/os-attach-interfaces/attach')
- req.method = 'POST'
- req.body = jsonutils.dumps({})
- req.headers['content-type'] = 'application/json'
- req.environ['nova.context'] = self.context
- self.assertRaises(exc.HTTPBadRequest,
- attachments.create, req, FAKE_UUID1,
- body=jsonutils.loads(req.body))
- attach_mock.assert_called_once_with(self.context, {}, None, None, None)
- get_mock.assert_called_once_with(self.context, FAKE_UUID1,
- want_objects=True,
- expected_attrs=None)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_availability_zone.py b/nova/tests/api/openstack/compute/plugins/v3/test_availability_zone.py
index ef41a7ef7b..4516e0660b 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_availability_zone.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_availability_zone.py
@@ -344,9 +344,9 @@ def queue_get_for(context, *args):
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2)
+ server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
if no_image:
- server.pop('image_ref', None)
+ server.pop('imageRef', None)
server.update(params)
body = dict(server=server)
req = fakes.HTTPRequestV3.blank('/v3/servers')
@@ -384,8 +384,8 @@ def create(*args, **kwargs):
body = {
'server': {
'name': 'config_drive_test',
- 'image_ref': image_href,
- 'flavor_ref': flavor_ref,
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
@@ -416,8 +416,8 @@ def test_create_instance_without_availability_zone(self):
body = {
'server': {
'name': 'config_drive_test',
- 'image_ref': image_href,
- 'flavor_ref': flavor_ref,
+ 'imageRef': image_href,
+ 'flavorRef': flavor_ref,
'metadata': {
'hello': 'world',
'open': 'stack',
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_block_device_mapping.py b/nova/tests/api/openstack/compute/plugins/v3/test_block_device_mapping.py
index 29530c4174..639b808f0d 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_block_device_mapping.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_block_device_mapping.py
@@ -62,8 +62,8 @@ def _test_create(self, params, no_image=False, override_controller=None):
'server': {
'min_count': 2,
'name': 'server_test',
- 'image_ref': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
- 'flavor_ref': 'http://localhost/123/flavors/3',
+ 'imageRef': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
+ 'flavorRef': 'http://localhost/123/flavors/3',
'metadata': {
'hello': 'world',
'open': 'stack',
@@ -72,7 +72,7 @@ def _test_create(self, params, no_image=False, override_controller=None):
}
if no_image:
- del body['server']['image_ref']
+ del body['server']['imageRef']
body['server'].update(params)
@@ -109,7 +109,7 @@ def test_create_instance_with_volumes_enabled_no_image(self):
old_create = compute_api.API.create
def create(*args, **kwargs):
- self.assertNotIn('image_ref', kwargs)
+ self.assertNotIn('imageRef', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
@@ -153,7 +153,8 @@ def create(*args, **kwargs):
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self.assertRaises(exc.HTTPBadRequest, self._test_create, params)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
def test_create_instance_with_device_name_empty(self):
self.bdm[0]['device_name'] = ''
@@ -167,7 +168,8 @@ def create(*args, **kwargs):
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self.assertRaises(exc.HTTPBadRequest, self._test_create, params)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
def test_create_instance_with_device_name_too_long(self):
self.bdm[0]['device_name'] = 'a' * 256
@@ -181,7 +183,8 @@ def create(*args, **kwargs):
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self.assertRaises(exc.HTTPBadRequest, self._test_create, params)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
def test_create_instance_with_space_in_device_name(self):
self.bdm[0]['device_name'] = 'v da'
@@ -196,7 +199,8 @@ def create(*args, **kwargs):
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self.assertRaises(exc.HTTPBadRequest, self._test_create, params)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
def test_create_instance_with_invalid_size(self):
self.bdm[0]['volume_size'] = 'hello world'
@@ -210,7 +214,8 @@ def create(*args, **kwargs):
self.stubs.Set(compute_api.API, 'create', create)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self.assertRaises(exc.HTTPBadRequest, self._test_create, params)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
def test_create_instance_bdm(self):
bdm = [{
@@ -241,7 +246,7 @@ def _validate_bdm(*args, **kwargs):
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
params = {block_device_mapping.ATTRIBUTE_NAME: bdm}
- self._test_create(params)
+ self._test_create(params, no_image=True)
def test_create_instance_bdm_missing_device_name(self):
del self.bdm[0]['device_name']
@@ -261,7 +266,7 @@ def _validate_bdm(*args, **kwargs):
self.stubs.Set(compute_api.API, '_validate_bdm', _validate_bdm)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self._test_create(params)
+ self._test_create(params, no_image=True)
def test_create_instance_bdm_validation_error(self):
def _validate(*args, **kwargs):
@@ -271,7 +276,8 @@ def _validate(*args, **kwargs):
'_validate', _validate)
params = {block_device_mapping.ATTRIBUTE_NAME: self.bdm}
- self.assertRaises(exc.HTTPBadRequest, self._test_create, params)
+ self.assertRaises(exc.HTTPBadRequest,
+ self._test_create, params, no_image=True)
@mock.patch('nova.compute.api.API._get_bdm_image_metadata')
def test_create_instance_non_bootable_volume_fails(self, fake_bdm_meta):
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_cells.py b/nova/tests/api/openstack/compute/plugins/v3/test_cells.py
index 874b1ffc08..404525ebb0 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_cells.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_cells.py
@@ -128,7 +128,7 @@ def test_get_cell_by_name(self):
self.assertEqual(cell['rpc_host'], 'r1.example.org')
self.assertNotIn('password', cell)
- def test_cell_delete(self):
+ def _cell_delete(self):
call_info = {'delete_called': 0}
def fake_cell_delete(inst, context, cell_name):
@@ -138,9 +138,20 @@ def fake_cell_delete(inst, context, cell_name):
self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete)
req = self._get_request("cells/cell999")
+ req.environ['nova.context'] = self.context
self.controller.delete(req, 'cell999')
self.assertEqual(call_info['delete_called'], 1)
+ def test_cell_delete(self):
+ # Test delete with just cells policy
+ rules = {"default": "is_admin:true",
+ "compute_extension:v3:os-cells": "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_delete()
+
+ def test_cell_delete_with_delete_policy(self):
+ self._cell_delete()
+
def test_delete_bogus_cell_raises(self):
def fake_cell_delete(inst, context, cell_name):
return 0
@@ -152,17 +163,28 @@ def fake_cell_delete(inst, context, cell_name):
self.assertRaises(exc.HTTPNotFound, self.controller.delete, req,
'cell999')
- def test_cell_create_parent(self):
+ def test_cell_delete_fails_for_invalid_policy(self):
+ def fake_cell_delete(inst, context, cell_name):
+ pass
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'cell_delete', fake_cell_delete)
+
+ req = self._get_request("cells/cell999")
+ req.environ['nova.context'] = self.context
+ req.environ["nova.context"].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.delete, req, 'cell999')
+
+ def _cell_create_parent(self):
body = {'cell': {'name': 'meow',
'username': 'fred',
'password': 'fubar',
'rpc_host': 'r3.example.org',
- 'type': 'parent',
- # Also test this is ignored/stripped
- 'is_parent': False}}
+ 'type': 'parent'}}
req = self._get_request("cells")
- res_dict = self.controller.create(req, body)
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.create(req, body=body)
cell = res_dict['cell']
self.assertEqual(self.controller.create.wsgi_code, 201)
self.assertEqual(cell['name'], 'meow')
@@ -170,9 +192,18 @@ def test_cell_create_parent(self):
self.assertEqual(cell['rpc_host'], 'r3.example.org')
self.assertEqual(cell['type'], 'parent')
self.assertNotIn('password', cell)
- self.assertNotIn('is_parent', cell)
- def test_cell_create_child(self):
+ def test_cell_create_parent(self):
+ # Test create with just cells policy
+ rules = {"default": "is_admin:true",
+ "compute_extension:v3:os-cells": "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_create_parent()
+
+ def test_cell_create_parent_with_create_policy(self):
+ self._cell_create_parent()
+
+ def _cell_create_child(self):
body = {'cell': {'name': 'meow',
'username': 'fred',
'password': 'fubar',
@@ -180,7 +211,8 @@ def test_cell_create_child(self):
'type': 'child'}}
req = self._get_request("cells")
- res_dict = self.controller.create(req, body)
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.create(req, body=body)
cell = res_dict['cell']
self.assertEqual(self.controller.create.wsgi_code, 201)
self.assertEqual(cell['name'], 'meow')
@@ -190,6 +222,16 @@ def test_cell_create_child(self):
self.assertNotIn('password', cell)
self.assertNotIn('is_parent', cell)
+ def test_cell_create_child(self):
+ # Test create child with just cells policy
+ rules = {"default": "is_admin:true",
+ "compute_extension:v3:os-cells": "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_create_child()
+
+ def test_cell_create_child_with_create_policy(self):
+ self._cell_create_child()
+
def test_cell_create_no_name_raises(self):
body = {'cell': {'username': 'moocow',
'password': 'secret',
@@ -197,8 +239,9 @@ def test_cell_create_no_name_raises(self):
'type': 'parent'}}
req = self._get_request("cells")
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.create, req, body)
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
def test_cell_create_name_empty_string_raises(self):
body = {'cell': {'name': '',
@@ -208,8 +251,9 @@ def test_cell_create_name_empty_string_raises(self):
'type': 'parent'}}
req = self._get_request("cells")
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.create, req, body)
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
def test_cell_create_name_with_bang_raises(self):
body = {'cell': {'name': 'moo!cow',
@@ -219,19 +263,9 @@ def test_cell_create_name_with_bang_raises(self):
'type': 'parent'}}
req = self._get_request("cells")
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.create, req, body)
-
- def test_cell_create_name_with_dot_raises(self):
- body = {'cell': {'name': 'moo.cow',
- 'username': 'fred',
- 'password': 'secret',
- 'rpc_host': 'r3.example.org',
- 'type': 'parent'}}
-
- req = self._get_request("cells")
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.create, req, body)
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
def test_cell_create_name_with_invalid_type_raises(self):
body = {'cell': {'name': 'moocow',
@@ -241,15 +275,25 @@ def test_cell_create_name_with_invalid_type_raises(self):
'type': 'invalid'}}
req = self._get_request("cells")
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.create, req, body)
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, body=body)
- def test_cell_update(self):
+ def test_cell_create_fails_for_invalid_policy(self):
+ body = {'cell': {'name': 'fake'}}
+ req = self._get_request("cells")
+ req.environ['nova.context'] = self.context
+ req.environ['nova.context'].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.create, req, body=body)
+
+ def _cell_update(self):
body = {'cell': {'username': 'zeb',
'password': 'sneaky'}}
req = self._get_request("cells/cell1")
- res_dict = self.controller.update(req, 'cell1', body)
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.update(req, 'cell1', body=body)
cell = res_dict['cell']
self.assertEqual(cell['name'], 'cell1')
@@ -257,14 +301,33 @@ def test_cell_update(self):
self.assertEqual(cell['username'], 'zeb')
self.assertNotIn('password', cell)
+ def test_cell_update(self):
+ # Test update with just cells policy
+ rules = {"default": "is_admin:true",
+ "compute_extension:v3:os-cells": "is_admin:true"}
+ self.policy.set_rules(rules)
+ self._cell_update()
+
+ def test_cell_update_with_update_policy(self):
+ self._cell_update()
+
+ def test_cell_update_fails_for_invalid_policy(self):
+ body = {'cell': {'name': 'got_changed'}}
+ req = self._get_request("cells/cell1")
+ req.environ['nova.context'] = self.context
+ req.environ['nova.context'].is_admin = False
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.create, req, body=body)
+
def test_cell_update_empty_name_raises(self):
body = {'cell': {'name': '',
'username': 'zeb',
'password': 'sneaky'}}
req = self._get_request("cells/cell1")
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.update, req, 'cell1', body)
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exception.ValidationError,
+ self.controller.update, req, 'cell1', body=body)
def test_cell_update_invalid_type_raises(self):
body = {'cell': {'username': 'zeb',
@@ -272,14 +335,16 @@ def test_cell_update_invalid_type_raises(self):
'password': 'sneaky'}}
req = self._get_request("cells/cell1")
- self.assertRaises(exc.HTTPBadRequest,
- self.controller.update, req, 'cell1', body)
+ req.environ['nova.context'] = self.context
+ self.assertRaises(exception.ValidationError,
+ self.controller.update, req, 'cell1', body=body)
def test_cell_update_without_type_specified(self):
body = {'cell': {'username': 'wingwj'}}
req = self._get_request("cells/cell1")
- res_dict = self.controller.update(req, 'cell1', body)
+ req.environ['nova.context'] = self.context
+ res_dict = self.controller.update(req, 'cell1', body=body)
cell = res_dict['cell']
self.assertEqual(cell['name'], 'cell1')
@@ -292,11 +357,13 @@ def test_cell_update_with_type_specified(self):
body2 = {'cell': {'username': 'wingwj', 'type': 'parent'}}
req1 = self._get_request("cells/cell1")
- res_dict1 = self.controller.update(req1, 'cell1', body1)
+ req1.environ['nova.context'] = self.context
+ res_dict1 = self.controller.update(req1, 'cell1', body=body1)
cell1 = res_dict1['cell']
req2 = self._get_request("cells/cell2")
- res_dict2 = self.controller.update(req2, 'cell2', body2)
+ req2.environ['nova.context'] = self.context
+ res_dict2 = self.controller.update(req2, 'cell2', body=body2)
cell2 = res_dict2['cell']
self.assertEqual(cell1['name'], 'cell1')
@@ -343,7 +410,7 @@ def test_show_capacities(self):
self.assertEqual(response, res_dict['cell']['capacities'])
def test_show_capacity_fails_with_non_admin_context(self):
- rules = {"compute_extension:cells": "is_admin:true"}
+ rules = {"compute_extension:v3:os-cells": "is_admin:true"}
self.policy.set_rules(rules)
self.mox.ReplayAll()
@@ -397,6 +464,7 @@ def sync_instances(self, context, **kwargs):
self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)
req = self._get_request("cells/sync_instances")
+ req.environ['nova.context'] = self.context
body = {}
self.controller.sync_instances(req, body=body)
self.assertIsNone(call_info['project_id'])
@@ -417,7 +485,7 @@ def sync_instances(self, context, **kwargs):
self.assertEqual(call_info['updated_since'], expected)
body = {'updated_since': 'skjdfkjsdkf'}
- self.assertRaises(exc.HTTPBadRequest,
+ self.assertRaises(exception.ValidationError,
self.controller.sync_instances, req, body=body)
body = {'deleted': False}
@@ -439,13 +507,27 @@ def sync_instances(self, context, **kwargs):
self.assertEqual(call_info['deleted'], True)
body = {'deleted': 'foo'}
- self.assertRaises(exc.HTTPBadRequest,
+ self.assertRaises(exception.ValidationError,
self.controller.sync_instances, req, body=body)
body = {'foo': 'meow'}
- self.assertRaises(exc.HTTPBadRequest,
+ self.assertRaises(exception.ValidationError,
self.controller.sync_instances, req, body=body)
+ def test_sync_instances_fails_for_invalid_policy(self):
+ def sync_instances(self, context, **kwargs):
+ pass
+
+ self.stubs.Set(cells_rpcapi.CellsAPI, 'sync_instances', sync_instances)
+
+ req = self._get_request("cells/sync_instances")
+ req.environ['nova.context'] = self.context
+ req.environ['nova.context'].is_admin = False
+
+ body = {}
+ self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller.sync_instances, req, body=body)
+
def test_cells_disabled(self):
self.flags(enable=False, group='cells')
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py b/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py
deleted file mode 100644
index ac4295dc6f..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_config_drive.py
+++ /dev/null
@@ -1,282 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import uuid
-
-from oslo.config import cfg
-import webob
-
-from nova.api.openstack.compute import plugins
-from nova.api.openstack.compute.plugins.v3 import config_drive
-from nova.api.openstack.compute.plugins.v3 import servers
-from nova.compute import api as compute_api
-from nova.compute import flavors
-from nova import db
-from nova.network import manager
-from nova.openstack.common import jsonutils
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-from nova.tests.image import fake
-
-
-CONF = cfg.CONF
-FAKE_UUID = fakes.FAKE_UUID
-
-
-def fake_gen_uuid():
- return FAKE_UUID
-
-
-def return_security_group(context, instance_id, security_group_id):
- pass
-
-
-class ConfigDriveTest(test.TestCase):
-
- def setUp(self):
- super(ConfigDriveTest, self).setUp()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
- fake.stub_out_image_service(self.stubs)
-
- def test_show(self):
- self.stubs.Set(db, 'instance_get',
- fakes.fake_instance_get())
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get())
- req = webob.Request.blank('/v3/servers/1')
- req.headers['Content-Type'] = 'application/json'
- response = req.get_response(fakes.wsgi_app_v3(
- init_only=('servers', 'os-config-drive')))
- self.assertEqual(response.status_int, 200)
- res_dict = jsonutils.loads(response.body)
- self.assertIn(config_drive.ATTRIBUTE_NAME, res_dict['server'])
-
- def test_detail_servers(self):
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fakes.fake_instance_get_all_by_filters())
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get())
- req = fakes.HTTPRequestV3.blank('/v3/servers/detail')
- res = req.get_response(fakes.wsgi_app_v3(
- init_only=('servers', 'os-config-drive')))
- server_dicts = jsonutils.loads(res.body)['servers']
- self.assertNotEqual(len(server_dicts), 0)
- for server_dict in server_dicts:
- self.assertIn(config_drive.ATTRIBUTE_NAME, server_dict)
-
-
-class ServersControllerCreateTest(test.TestCase):
-
- def setUp(self):
- """Shared implementation for tests below that create instance."""
- super(ServersControllerCreateTest, self).setUp()
-
- self.flags(verbose=True,
- enable_instance_password=True)
- self.instance_cache_num = 0
- self.instance_cache_by_id = {}
- self.instance_cache_by_uuid = {}
-
- ext_info = plugins.LoadedExtensionInfo()
- self.controller = servers.ServersController(extension_info=ext_info)
- CONF.set_override('extensions_blacklist', 'os-config-drive',
- 'osapi_v3')
- self.no_config_drive_controller = servers.ServersController(
- extension_info=ext_info)
-
- def instance_create(context, inst):
- inst_type = flavors.get_flavor_by_flavor_id(3)
- image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- def_image_ref = 'http://localhost/images/%s' % image_uuid
- self.instance_cache_num += 1
- instance = fake_instance.fake_db_instance(**{
- 'id': self.instance_cache_num,
- 'display_name': inst['display_name'] or 'test',
- 'uuid': FAKE_UUID,
- 'instance_type': dict(inst_type),
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fead::1234',
- 'image_ref': inst.get('image_ref', def_image_ref),
- 'user_id': 'fake',
- 'project_id': 'fake',
- 'reservation_id': inst['reservation_id'],
- "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
- "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
- "config_drive": None,
- "progress": 0,
- "fixed_ips": [],
- "task_state": "",
- "vm_state": "",
- "root_device_name": inst.get('root_device_name', 'vda'),
- })
-
- self.instance_cache_by_id[instance['id']] = instance
- self.instance_cache_by_uuid[instance['uuid']] = instance
- return instance
-
- def instance_get(context, instance_id):
- """Stub for compute/api create() pulling in instance after
- scheduling
- """
- return self.instance_cache_by_id[instance_id]
-
- def instance_update(context, uuid, values):
- instance = self.instance_cache_by_uuid[uuid]
- instance.update(values)
- return instance
-
- def server_update(context, instance_uuid, params):
- inst = self.instance_cache_by_uuid[instance_uuid]
- inst.update(params)
- return (inst, inst)
-
- def fake_method(*args, **kwargs):
- pass
-
- def project_get_networks(context, user_id):
- return dict(id='1', host='localhost')
-
- def queue_get_for(context, *args):
- return 'network_topic'
-
- fakes.stub_out_rate_limiting(self.stubs)
- fakes.stub_out_key_pair_funcs(self.stubs)
- fake.stub_out_image_service(self.stubs)
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
- self.stubs.Set(db, 'instance_add_security_group',
- return_security_group)
- self.stubs.Set(db, 'project_get_networks',
- project_get_networks)
- self.stubs.Set(db, 'instance_create', instance_create)
- self.stubs.Set(db, 'instance_system_metadata_update',
- fake_method)
- self.stubs.Set(db, 'instance_get', instance_get)
- self.stubs.Set(db, 'instance_update', instance_update)
- self.stubs.Set(db, 'instance_update_and_get_original',
- server_update)
- self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
- fake_method)
-
- def _test_create_extra(self, params, no_image=False,
- override_controller=None):
- image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
- server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2)
- if no_image:
- server.pop('image_ref', None)
- server.update(params)
- body = dict(server=server)
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- if override_controller:
- server = override_controller.create(req, body=body).obj['server']
- else:
- server = self.controller.create(req, body=body).obj['server']
-
- def test_create_instance_with_config_drive_disabled(self):
- params = {config_drive.ATTRIBUTE_NAME: "False"}
- old_create = compute_api.API.create
-
- def create(*args, **kwargs):
- self.assertNotIn('config_drive', kwargs)
- return old_create(*args, **kwargs)
-
- self.stubs.Set(compute_api.API, 'create', create)
- self._test_create_extra(params,
- override_controller=self.no_config_drive_controller)
-
- def test_create_instance_with_config_drive(self):
- def create(*args, **kwargs):
- self.assertIn('config_drive', kwargs)
- return old_create(*args, **kwargs)
-
- old_create = compute_api.API.create
- self.stubs.Set(compute_api.API, 'create', create)
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/v3/flavors/3'
- body = {
- 'server': {
- 'name': 'config_drive_test',
- 'image_ref': image_href,
- 'flavor_ref': flavor_ref,
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- config_drive.ATTRIBUTE_NAME: "true",
- },
- }
-
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = self.controller.create(req, body=body).obj
-
- server = res['server']
- self.assertEqual(FAKE_UUID, server['id'])
-
- def test_create_instance_with_bad_config_drive(self):
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/v3/flavors/3'
- body = {
- 'server': {
- 'name': 'config_drive_test',
- 'image_ref': image_href,
- 'flavor_ref': flavor_ref,
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- config_drive.ATTRIBUTE_NAME: image_href,
- },
- }
-
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
-
- self.assertRaises(webob.exc.HTTPBadRequest,
- self.controller.create, req, body=body)
-
- def test_create_instance_without_config_drive(self):
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/v3/flavors/3'
- body = {
- 'server': {
- 'name': 'config_drive_test',
- 'image_ref': image_href,
- 'flavor_ref': flavor_ref,
- 'metadata': {
- 'hello': 'world',
- 'open': 'stack',
- },
- },
- }
-
- req = fakes.HTTPRequestV3.blank('/servers')
- req.method = 'POST'
- req.body = jsonutils.dumps(body)
- req.headers["content-type"] = "application/json"
- res = self.controller.create(req, body=body).obj
-
- server = res['server']
- self.assertEqual(FAKE_UUID, server['id'])
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_evacuate.py b/nova/tests/api/openstack/compute/plugins/v3/test_evacuate.py
index 2f1f1ffc4d..cc168739ef 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_evacuate.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_evacuate.py
@@ -14,6 +14,7 @@
import uuid
+import mock
from oslo.config import cfg
import webob
@@ -82,11 +83,14 @@ def _gen_request_with_app(self, json_load, is_admin=True):
return req, app
- def test_evacuate_instance_with_no_target(self):
+ @mock.patch('nova.compute.api.API.evacuate')
+ def test_evacuate_instance_with_no_target(self, evacuate_mock):
req, app = self._gen_request_with_app({'on_shared_storage': 'False',
'admin_password': 'MyNewPass'})
res = req.get_response(app)
- self.assertEqual(400, res.status_int)
+ self.assertEqual(202, res.status_int)
+ evacuate_mock.assert_called_once_with(mock.ANY, mock.ANY, None,
+ mock.ANY, mock.ANY)
def test_evacuate_instance_with_empty_host(self):
req, app = self._gen_request_with_app({'host': '',
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_extended_server_attributes.py b/nova/tests/api/openstack/compute/plugins/v3/test_extended_server_attributes.py
deleted file mode 100644
index ab9bad4b04..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_extended_server_attributes.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import webob
-
-from nova.api.openstack.compute.plugins.v3 import extended_server_attributes
-from nova import compute
-from nova import db
-from nova import exception
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova.openstack.common import jsonutils
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-from oslo.config import cfg
-
-
-NAME_FMT = cfg.CONF.instance_name_template
-UUID1 = '00000000-0000-0000-0000-000000000001'
-UUID2 = '00000000-0000-0000-0000-000000000002'
-UUID3 = '00000000-0000-0000-0000-000000000003'
-
-
-def fake_compute_get(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID3, host="host-fake",
- node="node-fake")
- return fake_instance.fake_instance_obj(args[1],
- expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, **inst)
-
-
-def fake_compute_get_all(*args, **kwargs):
- db_list = [
- fakes.stub_instance(1, uuid=UUID1, host="host-1", node="node-1"),
- fakes.stub_instance(2, uuid=UUID2, host="host-2", node="node-2")
- ]
- fields = instance_obj.INSTANCE_DEFAULT_FIELDS
- return instance_obj._make_instance_list(args[1],
- objects.InstanceList(),
- db_list, fields)
-
-
-class ExtendedServerAttributesTest(test.TestCase):
- content_type = 'application/json'
- prefix = '%s:' % extended_server_attributes.ExtendedServerAttributes.alias
-
- def setUp(self):
- super(ExtendedServerAttributesTest, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
- self.stubs.Set(db, 'instance_get_by_uuid', fake_compute_get)
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(
- fakes.wsgi_app_v3(init_only=('servers',
- 'os-extended-server-attributes')))
- return res
-
- def _get_server(self, body):
- return jsonutils.loads(body).get('server')
-
- def _get_servers(self, body):
- return jsonutils.loads(body).get('servers')
-
- def assertServerAttributes(self, server, host, node, instance_name):
- self.assertEqual(server.get('%shost' % self.prefix), host)
- self.assertEqual(server.get('%sinstance_name' % self.prefix),
- instance_name)
- self.assertEqual(server.get('%shypervisor_hostname' % self.prefix),
- node)
-
- def test_show(self):
- url = '/v3/servers/%s' % UUID3
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertServerAttributes(self._get_server(res.body),
- host='host-fake',
- node='node-fake',
- instance_name=NAME_FMT % (1))
-
- def test_detail(self):
- url = '/v3/servers/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- for i, server in enumerate(self._get_servers(res.body)):
- self.assertServerAttributes(server,
- host='host-%s' % (i + 1),
- node='node-%s' % (i + 1),
- instance_name=NAME_FMT % (i + 1))
-
- def test_no_instance_passthrough_404(self):
-
- def fake_compute_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- url = '/v3/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 404)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_extended_status.py b/nova/tests/api/openstack/compute/plugins/v3/test_extended_status.py
deleted file mode 100644
index 32053abd9d..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_extended_status.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# Copyright 2011 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import webob
-
-from nova import compute
-from nova import db
-from nova import exception
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova.openstack.common import jsonutils
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-UUID1 = '00000000-0000-0000-0000-000000000001'
-UUID2 = '00000000-0000-0000-0000-000000000002'
-UUID3 = '00000000-0000-0000-0000-000000000003'
-
-
-def fake_compute_get(*args, **kwargs):
- inst = fakes.stub_instance(1, uuid=UUID3, task_state="kayaking",
- vm_state="slightly crunchy", power_state=1, locked_by='owner')
- return fake_instance.fake_instance_obj(args[1], **inst)
-
-
-def fake_compute_get_all(*args, **kwargs):
- db_list = [
- fakes.stub_instance(1, uuid=UUID1, task_state="task-1",
- vm_state="vm-1", power_state=1, locked_by=None),
- fakes.stub_instance(2, uuid=UUID2, task_state="task-2",
- vm_state="vm-2", power_state=2, locked_by='admin'),
- ]
- fields = instance_obj.INSTANCE_DEFAULT_FIELDS
- return instance_obj._make_instance_list(args[1],
- objects.InstanceList(),
- db_list, fields)
-
-
-class ExtendedStatusTest(test.TestCase):
- content_type = 'application/json'
- prefix = 'os-extended-status:'
-
- def setUp(self):
- super(ExtendedStatusTest, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
- return_server = fakes.fake_instance_get()
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app_v3(
- init_only=('servers',
- 'os-extended-status')))
- return res
-
- def _get_server(self, body):
- return jsonutils.loads(body).get('server')
-
- def _get_servers(self, body):
- return jsonutils.loads(body).get('servers')
-
- def assertServerStates(self, server, vm_state, power_state, task_state,
- locked_by):
- self.assertEqual(server.get('%svm_state' % self.prefix), vm_state)
- self.assertEqual(int(server.get('%spower_state' % self.prefix)),
- power_state)
- self.assertEqual(server.get('%stask_state' % self.prefix), task_state)
- self.assertEqual(str(server.get('%slocked_by' % self.prefix)),
- locked_by)
-
- def test_show(self):
- url = '/v3/servers/%s' % UUID3
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- self.assertServerStates(self._get_server(res.body),
- vm_state='slightly crunchy',
- power_state=1,
- task_state='kayaking',
- locked_by='owner')
-
- def test_detail(self):
- url = '/v3/servers/detail'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 200)
- for i, server in enumerate(self._get_servers(res.body)):
- self.assertServerStates(server,
- vm_state='vm-%s' % (i + 1),
- power_state=(i + 1),
- task_state='task-%s' % (i + 1),
- locked_by=['None', 'admin'][i])
-
- def test_no_instance_passthrough_404(self):
-
- def fake_compute_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- url = '/v3/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
- res = self._make_request(url)
-
- self.assertEqual(res.status_int, 404)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_flavor_access.py b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_access.py
index c2b310cb1c..595bc7321d 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_flavor_access.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_access.py
@@ -83,7 +83,7 @@ def _has_flavor_access(flavorid, projectid):
def fake_get_all_flavors_sorted_list(context, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
- if filters == None or filters['is_public'] == None:
+ if filters is None or filters['is_public'] is None:
return sorted(INSTANCE_TYPES.values(), key=lambda item: item[sort_key])
res = {}
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_flavor_manage.py b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_manage.py
index 61513ceece..466caca4f2 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_flavor_manage.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_flavor_manage.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import datetime
import webob
@@ -167,16 +168,24 @@ def test_create_without_flavor(self):
body = {'foo': None}
self._test_create_bad_request(body)
- def test_create_without_flavorid(self):
- expected = self.expected_flavor
- expected['flavor']['id'] = None
-
- res = self._create_flavor_helper(expected)
+ def _test_create_with_autogenerated_flavorid(self, request_body):
+ res = self._create_flavor_helper(request_body)
body = jsonutils.loads(res.body)
- for key in expected["flavor"]:
+ for key in self.expected_flavor["flavor"]:
if key != 'id':
- self.assertEqual(body["flavor"][key], expected["flavor"][key])
+ self.assertEqual(body["flavor"][key],
+ self.expected_flavor["flavor"][key])
+
+ def test_create_with_none_flavorid(self):
+ body = copy.deepcopy(self.expected_flavor)
+ body['flavor']['id'] = None
+ self._test_create_with_autogenerated_flavorid(body)
+
+ def test_create_without_flavorid(self):
+ body = copy.deepcopy(self.expected_flavor)
+ del body['flavor']['id']
+ self._test_create_with_autogenerated_flavorid(body)
def test_flavor_exists_exception_returns_409(self):
expected = self.expected_flavor
@@ -334,8 +343,8 @@ def test_create_private_flavor_should_create_flavor_access(self):
"tenant_id": "%s" % ctxt.project_id,
"flavor_id": "%s" % body["flavor"]["id"]
}
- self.assertTrue(expected_flavor_access_body in
- flavor_access_body["flavor_access"])
+ self.assertIn(expected_flavor_access_body,
+ flavor_access_body["flavor_access"])
def test_create_public_flavor_should_not_create_flavor_access(self):
self.base_request_dict['flavor']['flavor-access:is_public'] = True
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_flavors.py b/nova/tests/api/openstack/compute/plugins/v3/test_flavors.py
index 5f9f1a883d..52bdaddaf9 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_flavors.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_flavors.py
@@ -484,9 +484,6 @@ def setUp(self):
super(FlavorDisabledTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
- #def fake_flavor_get_all(*args, **kwargs):
- # return FAKE_FLAVORS
- #
self.stubs.Set(nova.compute.flavors, "get_all_flavors_sorted_list",
fake_get_all_flavors_sorted_list)
self.stubs.Set(nova.compute.flavors,
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_flavors_extra_specs.py b/nova/tests/api/openstack/compute/plugins/v3/test_flavors_extra_specs.py
index 525c405ed9..5364072954 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_flavors_extra_specs.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_flavors_extra_specs.py
@@ -110,7 +110,7 @@ def test_not_found_because_flavor(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, 1, 'key5')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
- req, 1, 'key5', {'key5': 'value5'})
+ req, 1, 'key5', body={'key5': 'value5'})
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, 1, 'key5')
@@ -119,7 +119,7 @@ def test_not_found_because_flavor(self):
with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
- req, 1, {'extra_specs': {'key5': 'value5'}})
+ req, 1, body={'extra_specs': {'key5': 'value5'}})
def test_delete(self):
flavor = dict(test_flavor.fake_flavor,
@@ -153,7 +153,7 @@ def test_create(self):
req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs',
use_admin_context=True)
- res_dict = self.controller.create(req, 1, body)
+ res_dict = self.controller.create(req, 1, body=body)
self.assertEqual('value1', res_dict['extra_specs']['key1'])
self.assertEqual(self.controller.create.wsgi_code, 201)
@@ -166,17 +166,40 @@ def test_create_no_admin(self):
req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs')
self.assertRaises(exception.Forbidden, self.controller.create,
- req, 1, body)
+ req, 1, body=body)
- def test_create_empty_body(self):
+ def _test_create_bad_request(self, body):
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs',
use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, 1, '')
+ self.assertRaises(exception.ValidationError, self.controller.create,
+ req, 1, body=body)
+
+ def test_create_empty_body(self):
+ self._test_create_bad_request('')
+
+ def test_create_non_dict_extra_specs(self):
+ self._test_create_bad_request({"extra_specs": "non_dict"})
+
+ def test_create_non_string_key(self):
+ self._test_create_bad_request({"extra_specs": {None: "value1"}})
+
+ def test_create_non_string_value(self):
+ self._test_create_bad_request({"extra_specs": {"key1": None}})
+
+ def test_create_zero_length_key(self):
+ self._test_create_bad_request({"extra_specs": {"": "value1"}})
+
+ def test_create_long_key(self):
+ key = "a" * 256
+ self._test_create_bad_request({"extra_specs": {key: "value1"}})
+
+ def test_create_long_value(self):
+ value = "a" * 256
+ self._test_create_bad_request({"extra_specs": {"key1": value}})
def test_create_flavor_not_found(self):
def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
@@ -189,7 +212,7 @@ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
- req, 1, body)
+ req, 1, body=body)
def test_create_flavor_db_duplicate(self):
def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
@@ -202,7 +225,7 @@ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
- req, 1, body)
+ req, 1, body=body)
@mock.patch('nova.db.flavor_extra_specs_update_or_create')
def test_create_invalid_specs_key(self, mock_flavor_extra_specs):
@@ -214,8 +237,8 @@ def test_create_invalid_specs_key(self, mock_flavor_extra_specs):
req = fakes.HTTPRequest.blank('/flavors/1/extra-specs',
use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
- req, 1, body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, req, 1, body=body)
@mock.patch('nova.db.flavor_extra_specs_update_or_create')
def test_create_valid_specs_key(self, mock_flavor_extra_specs):
@@ -227,7 +250,7 @@ def test_create_valid_specs_key(self, mock_flavor_extra_specs):
req = fakes.HTTPRequest.blank('/flavors/1/extra-specs',
use_admin_context=True)
- res_dict = self.controller.create(req, 1, body)
+ res_dict = self.controller.create(req, 1, body=body)
self.assertEqual('value1', res_dict['extra_specs'][key])
self.assertEqual(self.controller.create.wsgi_code, 201)
@@ -239,7 +262,7 @@ def test_update_item(self):
req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/key1',
use_admin_context=True)
- res_dict = self.controller.update(req, 1, 'key1', body)
+ res_dict = self.controller.update(req, 1, 'key1', body=body)
self.assertEqual('value1', res_dict['key1'])
@@ -251,28 +274,44 @@ def test_update_item_no_admin(self):
req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/key1')
self.assertRaises(exception.Forbidden, self.controller.update,
- req, 1, 'key1', body)
+ req, 1, 'key1', body=body)
- def test_update_item_empty_body(self):
+ def _test_update_item_bad_request(self, body):
self.stubs.Set(nova.db,
'flavor_extra_specs_update_or_create',
return_create_flavor_extra_specs)
req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/key1',
use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 1, 'key1', '')
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ req, 1, 'key1', body=body)
+
+ def test_update_item_empty_body(self):
+ self._test_update_item_bad_request('')
def test_update_item_too_many_keys(self):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
body = {"key1": "value1", "key2": "value2"}
+ self._test_update_item_bad_request(body)
- req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/key1',
- use_admin_context=True)
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 1, 'key1', body)
+ def test_update_item_non_dict_extra_specs(self):
+ self._test_update_item_bad_request("non_dict")
+
+ def test_update_item_non_string_key(self):
+ self._test_update_item_bad_request({None: "value1"})
+
+ def test_update_item_non_string_value(self):
+ self._test_update_item_bad_request({"key1": None})
+
+ def test_update_item_zero_length_key(self):
+ self._test_update_item_bad_request({"": "value1"})
+
+ def test_update_item_long_key(self):
+ key = "a" * 256
+ self._test_update_item_bad_request({key: "value1"})
+
+ def test_update_item_long_value(self):
+ value = "a" * 256
+ self._test_update_item_bad_request({"key1": value})
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(nova.db,
@@ -283,7 +322,7 @@ def test_update_item_body_uri_mismatch(self):
req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/bad',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- req, 1, 'bad', body)
+ req, 1, 'bad', body=body)
def test_update_flavor_not_found(self):
def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
@@ -297,7 +336,7 @@ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/key1',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
- req, 1, 'key1', body)
+ req, 1, 'key1', body=body)
def test_update_flavor_db_duplicate(self):
def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
@@ -311,4 +350,4 @@ def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
req = fakes.HTTPRequestV3.blank('/flavors/1/extra-specs/key1',
use_admin_context=True)
self.assertRaises(webob.exc.HTTPConflict, self.controller.update,
- req, 1, 'key1', body)
+ req, 1, 'key1', body=body)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_hide_server_addresses.py b/nova/tests/api/openstack/compute/plugins/v3/test_hide_server_addresses.py
deleted file mode 100644
index ad57aa095c..0000000000
--- a/nova/tests/api/openstack/compute/plugins/v3/test_hide_server_addresses.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import itertools
-
-import webob
-
-from nova import compute
-from nova.compute import vm_states
-from nova import db
-from nova import exception
-from nova import objects
-from nova.objects import instance as instance_obj
-from nova.openstack.common import jsonutils
-from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests import fake_instance
-
-
-SENTINEL = object()
-
-
-def fake_compute_get(*args, **kwargs):
- def _return_server(*_args, **_kwargs):
- inst = fakes.stub_instance(*args, **kwargs)
- return fake_instance.fake_instance_obj(_args[1], **inst)
- return _return_server
-
-
-class HideServerAddressesTest(test.TestCase):
- content_type = 'application/json'
-
- def setUp(self):
- super(HideServerAddressesTest, self).setUp()
- fakes.stub_out_nw_api(self.stubs)
- return_server = fakes.fake_instance_get()
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
-
- def _make_request(self, url):
- req = webob.Request.blank(url)
- req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app_v3(
- init_only=('servers', 'os-hide-server-addresses')))
- return res
-
- @staticmethod
- def _get_server(body):
- return jsonutils.loads(body).get('server')
-
- @staticmethod
- def _get_servers(body):
- return jsonutils.loads(body).get('servers')
-
- @staticmethod
- def _get_addresses(server):
- return server.get('addresses', SENTINEL)
-
- def _check_addresses(self, addresses, exists):
- self.assertTrue(addresses is not SENTINEL)
- if exists:
- self.assertTrue(addresses)
- else:
- self.assertFalse(addresses)
-
- def test_show_hides_in_building(self):
- instance_id = 1
- uuid = fakes.get_fake_uuid(instance_id)
- self.stubs.Set(compute.api.API, 'get',
- fake_compute_get(instance_id, uuid=uuid,
- vm_state=vm_states.BUILDING))
- res = self._make_request('/v3/servers/%s' % uuid)
- self.assertEqual(res.status_int, 200)
-
- server = self._get_server(res.body)
- addresses = self._get_addresses(server)
- self._check_addresses(addresses, exists=False)
-
- def test_show(self):
- instance_id = 1
- uuid = fakes.get_fake_uuid(instance_id)
- self.stubs.Set(compute.api.API, 'get',
- fake_compute_get(instance_id, uuid=uuid,
- vm_state=vm_states.ACTIVE))
- res = self._make_request('/v3/servers/%s' % uuid)
- self.assertEqual(res.status_int, 200)
-
- server = self._get_server(res.body)
- addresses = self._get_addresses(server)
- self._check_addresses(addresses, exists=True)
-
- def test_detail_hides_building_server_addresses(self):
- instance_0 = fakes.stub_instance(0, uuid=fakes.get_fake_uuid(0),
- vm_state=vm_states.ACTIVE)
- instance_1 = fakes.stub_instance(1, uuid=fakes.get_fake_uuid(1),
- vm_state=vm_states.BUILDING)
- instances = [instance_0, instance_1]
-
- def get_all(*args, **kwargs):
- fields = instance_obj.INSTANCE_DEFAULT_FIELDS
- return instance_obj._make_instance_list(
- args[1], objects.InstanceList(), instances, fields)
-
- self.stubs.Set(compute.api.API, 'get_all', get_all)
- res = self._make_request('/v3/servers/detail')
-
- self.assertEqual(res.status_int, 200)
- servers = self._get_servers(res.body)
-
- self.assertEqual(len(servers), len(instances))
-
- for instance, server in itertools.izip(instances, servers):
- addresses = self._get_addresses(server)
- exists = (instance['vm_state'] == vm_states.ACTIVE)
- self._check_addresses(addresses, exists=exists)
-
- def test_no_instance_passthrough_404(self):
-
- def fake_compute_get(*args, **kwargs):
- raise exception.InstanceNotFound(instance_id='fake')
-
- self.stubs.Set(compute.api.API, 'get', fake_compute_get)
- res = self._make_request('/v3/servers/' + fakes.get_fake_uuid())
-
- self.assertEqual(res.status_int, 404)
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_hosts.py b/nova/tests/api/openstack/compute/plugins/v3/test_hosts.py
index 24d4e1ae7a..375be3d489 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_hosts.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_hosts.py
@@ -167,7 +167,7 @@ def setUp(self):
def _test_host_update(self, host, key, val, expected_value):
body = {'host': {key: val}}
- result = self.controller.update(self.req, host, body)
+ result = self.controller.update(self.req, host, body=body)
self.assertEqual(result['host'][key], expected_value)
def test_list_hosts(self):
@@ -211,7 +211,7 @@ def _test_host_update_service_unavailable(self, key, val):
body = {'host': {key: val}}
host = "serviceunavailable"
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, host, body)
+ self.req, host, body=body)
def test_enable_host_service_unavailable(self):
self._test_host_update_service_unavailable('status', 'enable')
@@ -307,45 +307,55 @@ def test_host_power_action_bad_host(self):
def test_bad_status_value(self):
bad_body = {"host": {"status": "bad"}}
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, "host_c1", bad_body)
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ self.req, "host", body=bad_body)
bad_body2 = {"host": {"status": "disablabc"}}
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, "host_c1", bad_body2)
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ self.req, "host", body=bad_body2)
def test_bad_update_key(self):
bad_body = {"host": {"crazy": "bad"}}
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, "host_c1", bad_body)
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ self.req, "host", body=bad_body)
def test_bad_update_key_type(self):
bad_body = {"host": "abc"}
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, "host_c1", bad_body)
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ self.req, "host", body=bad_body)
bad_body = {"host": None}
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, "host_c1", bad_body)
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ self.req, "host", body=bad_body)
def test_bad_update_empty(self):
bad_body = {}
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, "host_c1", bad_body)
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ self.req, "host", body=bad_body)
def test_bad_update_key_and_correct_update_key(self):
bad_body = {"host": {"status": "disable",
"crazy": "bad"}}
- self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
- self.req, "host_c1", bad_body)
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ self.req, "host", body=bad_body)
def test_good_update_keys(self):
body = {"host": {"status": "disable",
"maintenance_mode": "enable"}}
- result = self.controller.update(self.req, 'host_c1', body)
+ result = self.controller.update(self.req, 'host_c1', body=body)
self.assertEqual(result["host"]["host"], "host_c1")
self.assertEqual(result["host"]["status"], "disabled")
self.assertEqual(result["host"]["maintenance_mode"],
"on_maintenance")
+ def test_update_with_status_key_only(self):
+ body = {"host": {"status": "enable"}}
+ result = self.controller.update(self.req, 'host_c1', body=body)
+ self.assertEqual("enabled", result["host"]["status"])
+
+ def test_update_with_maintenance_mode_key_only(self):
+ body = {"host": {"maintenance_mode": "enable"}}
+ result = self.controller.update(self.req, 'host_c1', body=body)
+ self.assertEqual("on_maintenance", result["host"]["maintenance_mode"])
+
def test_show_forbidden(self):
self.req.environ["nova.context"].is_admin = False
dest = 'dummydest'
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_hypervisors.py b/nova/tests/api/openstack/compute/plugins/v3/test_hypervisors.py
index 7d746c8148..039dae759c 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_hypervisors.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_hypervisors.py
@@ -13,6 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+
+import mock
from webob import exc
from nova.api.openstack.compute.plugins.v3 import hypervisors
@@ -32,6 +35,7 @@
topic="compute_topic",
report_count=5,
disabled=False,
+ disabled_reason=None,
availability_zone="nova"),
vcpus=4,
memory_mb=10 * 1024,
@@ -57,6 +61,7 @@
topic="compute_topic",
report_count=5,
disabled=False,
+ disabled_reason=None,
availability_zone="nova"),
vcpus=4,
memory_mb=10 * 1024,
@@ -134,6 +139,8 @@ class HypervisorsTest(test.NoDBTestCase):
def setUp(self):
super(HypervisorsTest, self).setUp()
self.controller = hypervisors.HypervisorsController()
+ self.controller.servicegroup_api.service_is_up = mock.MagicMock(
+ return_value=True)
self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all)
self.stubs.Set(db, 'compute_node_search_by_hypervisor',
@@ -148,7 +155,9 @@ def setUp(self):
def test_view_hypervisor_nodetail_noservers(self):
result = self.controller._view_hypervisor(TEST_HYPERS[0], False)
- self.assertEqual(result, dict(id=1, hypervisor_hostname="hyper1"))
+ self.assertEqual(dict(id=1, hypervisor_hostname="hyper1",
+ state='up', status='enabled'),
+ result)
def test_view_hypervisor_detail_noservers(self):
result = self.controller._view_hypervisor(TEST_HYPERS[0], True)
@@ -156,6 +165,8 @@ def test_view_hypervisor_detail_noservers(self):
self.assertEqual(result, dict(
id=1,
hypervisor_hostname="hyper1",
+ state='up',
+ status='enabled',
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
@@ -171,7 +182,7 @@ def test_view_hypervisor_detail_noservers(self):
cpu_info='cpu_info',
disk_available_least=100,
host_ip='1.1.1.1',
- service=dict(id=1, host='compute1')))
+ service=dict(id=1, host='compute1', disabled_reason=None)))
def test_view_hypervisor_servers(self):
result = self.controller._view_hypervisor(TEST_HYPERS[0], False,
@@ -180,20 +191,44 @@ def test_view_hypervisor_servers(self):
self.assertEqual(result, dict(
id=1,
hypervisor_hostname="hyper1",
+ state='up',
+ status='enabled',
servers=[
dict(name="inst1", id="uuid1"),
dict(name="inst2", id="uuid2"),
dict(name="inst3", id="uuid3"),
dict(name="inst4", id="uuid4")]))
+ def test_view_hypervisor_service_status(self):
+ result = self.controller._view_hypervisor(TEST_HYPERS[0], False)
+ self.assertEqual('up', result['state'])
+ self.assertEqual('enabled', result['status'])
+
+ self.controller.servicegroup_api.service_is_up.return_value = False
+ result = self.controller._view_hypervisor(TEST_HYPERS[0], False)
+ self.assertEqual('down', result['state'])
+ self.assertEqual('enabled', result['status'])
+
+ hyper = copy.deepcopy(TEST_HYPERS[0])
+ hyper['service']['disabled'] = True
+ result = self.controller._view_hypervisor(hyper, False)
+ self.assertEqual('down', result['state'])
+ self.assertEqual('disabled', result['status'])
+
def test_index(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors',
use_admin_context=True)
result = self.controller.index(req)
self.assertEqual(result, dict(hypervisors=[
- dict(id=1, hypervisor_hostname="hyper1"),
- dict(id=2, hypervisor_hostname="hyper2")]))
+ dict(id=1,
+ hypervisor_hostname="hyper1",
+ state='up',
+ status='enabled'),
+ dict(id=2,
+ hypervisor_hostname="hyper2",
+ state='up',
+ status='enabled')]))
def test_index_non_admin(self):
req = fakes.HTTPRequestV3.blank('/os-hypervisors')
@@ -207,8 +242,11 @@ def test_detail(self):
self.assertEqual(result, dict(hypervisors=[
dict(id=1,
- service=dict(id=1, host="compute1"),
+ service=dict(
+ id=1, host="compute1", disabled_reason=None),
vcpus=4,
+ state='up',
+ status='enabled',
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
@@ -225,8 +263,11 @@ def test_detail(self):
disk_available_least=100,
host_ip='1.1.1.1'),
dict(id=2,
- service=dict(id=2, host="compute2"),
+ service=dict(id=2, host="compute2",
+ disabled_reason=None),
vcpus=4,
+ state='up',
+ status='enabled',
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
@@ -265,8 +306,10 @@ def test_show_withid(self):
self.assertEqual(result, dict(hypervisor=dict(
id=1,
- service=dict(id=1, host="compute1"),
+ service=dict(id=1, host="compute1", disabled_reason=None),
vcpus=4,
+ state='up',
+ status='enabled',
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
@@ -319,6 +362,8 @@ def fake_get_host_uptime(context, hyp):
self.assertEqual(result, dict(hypervisor=dict(
id=1,
hypervisor_hostname="hyper1",
+ state='up',
+ status='enabled',
uptime="fake uptime")))
def test_uptime_non_integer_id(self):
@@ -336,8 +381,10 @@ def test_search(self):
use_admin_context=True)
result = self.controller.search(req)
self.assertEqual(result, dict(hypervisors=[
- dict(id=1, hypervisor_hostname="hyper1"),
- dict(id=2, hypervisor_hostname="hyper2")]))
+ dict(id=1, hypervisor_hostname="hyper1",
+ state='up', status='enabled'),
+ dict(id=2, hypervisor_hostname="hyper2",
+ state='up', status='enabled')]))
def test_search_non_exist(self):
def fake_compute_node_search_by_hypervisor_return_empty(context,
@@ -362,6 +409,8 @@ def test_servers(self):
self.assertEqual(result, dict(hypervisor=
dict(id=1,
hypervisor_hostname="hyper1",
+ state='up',
+ status='enabled',
servers=[
dict(name="inst1", id="uuid1"),
dict(name="inst3", id="uuid3")])))
@@ -387,6 +436,8 @@ def fake_instance_get_all_by_host_return_empty(context, hypervisor_re):
self.assertEqual(result, dict(hypervisor=
dict(id=1,
hypervisor_hostname="hyper1",
+ state='up',
+ status='enabled',
servers=[])))
def test_servers_with_non_integer_hypervisor_id(self):
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py b/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py
index 29ed96a32b..d2ea6f29e8 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_instance_actions.py
@@ -83,7 +83,7 @@ def test_list_actions_restricted_by_project(self):
policy.set_rules(rules)
def fake_instance_get_by_uuid(context, instance_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
@@ -100,7 +100,7 @@ def test_get_action_restricted_by_project(self):
policy.set_rules(rules)
def fake_instance_get_by_uuid(context, instance_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
@@ -124,7 +124,7 @@ def fake_get(self, context, instance_uuid, expected_attrs=None,
return {'uuid': instance_uuid}
def fake_instance_get_by_uuid(context, instance_id,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
diff --git a/nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py b/nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py
index b926d175e8..76fb432e73 100644
--- a/nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py
+++ b/nova/tests/api/openstack/compute/plugins/v3/test_keypairs.py
@@ -169,7 +169,7 @@ def test_keypair_create_with_non_alphanumeric_name(self):
res_dict = jsonutils.loads(res.body)
self.assertEqual(
"Invalid input for field/attribute name. Value: test/keypair. "
- "u'test/keypair' does not match '^(?! )[a-zA-Z0-9. _-]+(?\n"
'999'
''))
- # Set up our master template
+ # Set up our main template
root = xmlutil.TemplateElement('extra_specs', selector='extra_specs',
colon_ns=True)
value = xmlutil.SubTemplateElement(root, 'foo:bar', selector='foo:bar',
colon_ns=True)
value.text = xmlutil.Selector()
- master = xmlutil.MasterTemplate(root, 1)
- result = master.serialize(obj)
+ main = xmlutil.MainTemplate(root, 1)
+ result = main.serialize(obj)
self.assertEqual(expected_xml, result)
def test__serialize_with_empty_datum_selector(self):
@@ -734,76 +734,76 @@ def test__serialize_with_empty_datum_selector(self):
root = xmlutil.TemplateElement('test', selector='test',
name='name')
- master = xmlutil.MasterTemplate(root, 1)
- root_slave = xmlutil.TemplateElement('test', selector='test')
- image = xmlutil.SubTemplateElement(root_slave, 'image',
+ main = xmlutil.MainTemplate(root, 1)
+ root_subordinate = xmlutil.TemplateElement('test', selector='test')
+ image = xmlutil.SubTemplateElement(root_subordinate, 'image',
selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
- slave = xmlutil.SlaveTemplate(root_slave, 1)
- master.attach(slave)
+ subordinate = xmlutil.SubordinateTemplate(root_subordinate, 1)
+ main.attach(subordinate)
- siblings = master._siblings()
- result = master._serialize(None, obj, siblings)
+ siblings = main._siblings()
+ result = main._serialize(None, obj, siblings)
self.assertEqual(result.tag, 'test')
self.assertEqual(result[0].tag, 'image')
self.assertEqual(result[0].get('id'), str(obj['test']['image']))
-class MasterTemplateBuilder(xmlutil.TemplateBuilder):
+class MainTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
- return xmlutil.MasterTemplate(elem, 1)
+ return xmlutil.MainTemplate(elem, 1)
-class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
+class SubordinateTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
- return xmlutil.SlaveTemplate(elem, 1)
+ return xmlutil.SubordinateTemplate(elem, 1)
class TemplateBuilderTest(test.NoDBTestCase):
- def test_master_template_builder(self):
+ def test_main_template_builder(self):
# Make sure the template hasn't been built yet
- self.assertIsNone(MasterTemplateBuilder._tmpl)
+ self.assertIsNone(MainTemplateBuilder._tmpl)
# Now, construct the template
- tmpl1 = MasterTemplateBuilder()
+ tmpl1 = MainTemplateBuilder()
# Make sure that there is a template cached...
- self.assertIsNotNone(MasterTemplateBuilder._tmpl)
+ self.assertIsNotNone(MainTemplateBuilder._tmpl)
# Make sure it wasn't what was returned...
- self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
+ self.assertNotEqual(MainTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
- cached = MasterTemplateBuilder._tmpl
- tmpl2 = MasterTemplateBuilder()
- self.assertEqual(MasterTemplateBuilder._tmpl, cached)
+ cached = MainTemplateBuilder._tmpl
+ tmpl2 = MainTemplateBuilder()
+ self.assertEqual(MainTemplateBuilder._tmpl, cached)
# Make sure we're always getting fresh copies
self.assertNotEqual(tmpl1, tmpl2)
# Make sure we can override the copying behavior
- tmpl3 = MasterTemplateBuilder(False)
- self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
+ tmpl3 = MainTemplateBuilder(False)
+ self.assertEqual(MainTemplateBuilder._tmpl, tmpl3)
- def test_slave_template_builder(self):
+ def test_subordinate_template_builder(self):
# Make sure the template hasn't been built yet
- self.assertIsNone(SlaveTemplateBuilder._tmpl)
+ self.assertIsNone(SubordinateTemplateBuilder._tmpl)
# Now, construct the template
- tmpl1 = SlaveTemplateBuilder()
+ tmpl1 = SubordinateTemplateBuilder()
# Make sure there is a template cached...
- self.assertIsNotNone(SlaveTemplateBuilder._tmpl)
+ self.assertIsNotNone(SubordinateTemplateBuilder._tmpl)
# Make sure it was what was returned...
- self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
+ self.assertEqual(SubordinateTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
- tmpl2 = SlaveTemplateBuilder()
- self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
+ tmpl2 = SubordinateTemplateBuilder()
+ self.assertEqual(SubordinateTemplateBuilder._tmpl, tmpl1)
# Make sure we're always getting the cached copy
self.assertEqual(tmpl1, tmpl2)
@@ -829,7 +829,7 @@ def test_make_flat_dict(self):
expected_xml = ("\n"
'foobar')
root = xmlutil.make_flat_dict('wrapper')
- tmpl = xmlutil.MasterTemplate(root, 1)
+ tmpl = xmlutil.MainTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
@@ -837,7 +837,7 @@ def test_make_flat_dict(self):
'foobar'
"")
root = xmlutil.make_flat_dict('wrapper', ns='ns')
- tmpl = xmlutil.MasterTemplate(root, 1)
+ tmpl = xmlutil.MainTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
@@ -847,10 +847,10 @@ def test_make_flat_dict_with_colon_tagname_support(self):
expected_xml = (("\n"
'999'
''))
- # Set up our master template
+ # Set up our main template
root = xmlutil.make_flat_dict('extra_specs', colon_ns=True)
- master = xmlutil.MasterTemplate(root, 1)
- result = master.serialize(obj)
+ main = xmlutil.MainTemplate(root, 1)
+ result = main.serialize(obj)
self.assertEqual(expected_xml, result)
def test_make_flat_dict_with_parent(self):
@@ -867,8 +867,8 @@ def test_make_flat_dict_with_parent(self):
root.set('id')
extra = xmlutil.make_flat_dict('extra_info', root=root)
root.append(extra)
- master = xmlutil.MasterTemplate(root, 1)
- result = master.serialize(obj)
+ main = xmlutil.MainTemplate(root, 1)
+ result = main.serialize(obj)
self.assertEqual(expected_xml, result)
def test_make_flat_dict_with_dicts(self):
@@ -885,8 +885,8 @@ def test_make_flat_dict_with_dicts(self):
ignore_sub_dicts=True)
extra = xmlutil.make_flat_dict('extra_info', selector='extra_info')
root.append(extra)
- master = xmlutil.MasterTemplate(root, 1)
- result = master.serialize(obj)
+ main = xmlutil.MainTemplate(root, 1)
+ result = main.serialize(obj)
self.assertEqual(expected_xml, result)
def test_safe_parse_xml(self):
diff --git a/nova/tests/api/test_auth.py b/nova/tests/api/test_auth.py
index 8197909192..8505e381e6 100644
--- a/nova/tests/api/test_auth.py
+++ b/nova/tests/api/test_auth.py
@@ -12,14 +12,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
-
from oslo.config import cfg
import webob
import webob.exc
import nova.api.auth
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova.openstack.common import jsonutils
from nova.openstack.common.middleware import request_id
from nova import test
@@ -41,7 +40,7 @@ def fake_app(req):
self.request = webob.Request.blank('/')
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
- self.request.headers['X_SERVICE_CATALOG'] = json.dumps({})
+ self.request.headers['X_SERVICE_CATALOG'] = jsonutils.dumps({})
def test_no_user_or_user_id(self):
response = self.request.get_response(self.middleware)
@@ -102,7 +101,7 @@ def role_check_app(req):
self.request.headers['X_USER'] = 'testuser'
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
- self.request.headers['X_SERVICE_CATALOG'] = json.dumps({})
+ self.request.headers['X_SERVICE_CATALOG'] = jsonutils.dumps({})
self.roles = "pawn, knight, rook"
diff --git a/nova/tests/cells/test_cells_filters.py b/nova/tests/cells/test_cells_filters.py
index c4f0240611..01ad3580ff 100644
--- a/nova/tests/cells/test_cells_filters.py
+++ b/nova/tests/cells/test_cells_filters.py
@@ -82,6 +82,7 @@ def test_missing_hypervisor_version_requires(self):
def test_missing_hypervisor_version_in_cells(self):
image = {'properties': {'hypervisor_version_requires': '>6.2.1'}}
self.filter_props['request_spec'] = {'image': image}
+ self.cell1.capabilities = {"prominent_hypervisor_version": set([])}
self.assertEqual(self.cells,
self._filter_cells(self.cells, self.filter_props))
@@ -162,8 +163,7 @@ def _fake_build_instances(ctxt, cell, sched_kwargs):
'routing_path': current_cell,
'scheduler': self.scheduler,
'context': self.context,
- 'host_sched_kwargs': 'meow',
- 'cell_scheduler_method': 'build_instances'}
+ 'host_sched_kwargs': 'meow'}
# None is returned to bypass further scheduling.
self.assertIsNone(self._filter_cells(cells, filter_props))
# The filter should have re-scheduled to the child cell itself.
diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py
index 3acd49bdc4..f5730b3086 100644
--- a/nova/tests/cells/test_cells_messaging.py
+++ b/nova/tests/cells/test_cells_messaging.py
@@ -17,6 +17,8 @@
Tests For Cells Messaging module
"""
+import contextlib
+
import mock
import mox
from oslo.config import cfg
@@ -33,7 +35,6 @@
from nova import objects
from nova.objects import base as objects_base
from nova.objects import fields as objects_fields
-from nova.objects import instance_fault as instance_fault_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
@@ -1122,6 +1123,31 @@ def test_call_compute_api_with_obj(self):
extra_properties='props')
self.assertEqual('foo', result)
+ def test_call_compute_api_with_obj_no_cache(self):
+ instance = objects.Instance()
+ instance.uuid = uuidutils.generate_uuid()
+ error = exception.InstanceInfoCacheNotFound(
+ instance_uuid=instance.uuid)
+ with mock.patch.object(instance, 'refresh', side_effect=error):
+ self.assertRaises(exception.InstanceInfoCacheNotFound,
+ self.tgt_methods_cls._call_compute_api_with_obj,
+ self.ctxt, instance, 'snapshot')
+
+ def test_call_delete_compute_api_with_obj_no_cache(self):
+ instance = objects.Instance()
+ instance.uuid = uuidutils.generate_uuid()
+ error = exception.InstanceInfoCacheNotFound(
+ instance_uuid=instance.uuid)
+ with contextlib.nested(
+ mock.patch.object(instance, 'refresh',
+ side_effect=error),
+ mock.patch.object(self.tgt_compute_api, 'delete')) as (inst,
+ delete):
+ self.tgt_methods_cls._call_compute_api_with_obj(self.ctxt,
+ instance,
+ 'delete')
+ delete.assert_called_once_with(self.ctxt, instance)
+
def test_call_compute_with_obj_unknown_instance(self):
instance = objects.Instance()
instance.uuid = uuidutils.generate_uuid()
@@ -1621,7 +1647,7 @@ def test_instance_fault_create_at_top(self):
'message': 'fake-message',
'details': 'fake-details'}
- if_mock = mock.Mock(spec_set=instance_fault_obj.InstanceFault)
+ if_mock = mock.Mock(spec_set=objects.InstanceFault)
def _check_create():
self.assertEqual('fake-message', if_mock.message)
@@ -1631,8 +1657,7 @@ def _check_create():
if_mock.create.side_effect = _check_create
- with mock.patch.object(instance_fault_obj,
- 'InstanceFault') as if_obj_mock:
+ with mock.patch.object(objects, 'InstanceFault') as if_obj_mock:
if_obj_mock.return_value = if_mock
self.src_msg_runner.instance_fault_create_at_top(
self.ctxt, fake_instance_fault)
diff --git a/nova/tests/cells/test_cells_state_manager.py b/nova/tests/cells/test_cells_state_manager.py
index 1c299277b6..1a9347d1c3 100644
--- a/nova/tests/cells/test_cells_state_manager.py
+++ b/nova/tests/cells/test_cells_state_manager.py
@@ -16,12 +16,16 @@
Tests For CellStateManager
"""
+import time
+
+import mock
from oslo.config import cfg
from nova.cells import state
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
+from nova.openstack.common.db import exception as db_exc
from nova import test
@@ -145,6 +149,19 @@ def _capacity(self, reserve_percent):
return my_state.capacities
+class TestCellStateManagerException(test.TestCase):
+ @mock.patch.object(time, 'sleep')
+ def test_init_db_error(self, mock_sleep):
+ class TestCellStateManagerDB(state.CellStateManagerDB):
+ def __init__(self):
+ self._cell_data_sync = mock.Mock()
+ self._cell_data_sync.side_effect = [db_exc.DBError(), []]
+ super(TestCellStateManagerDB, self).__init__()
+ test = TestCellStateManagerDB()
+ mock_sleep.assert_called_once_with(30)
+ self.assertEqual(test._cell_data_sync.call_count, 2)
+
+
class TestCellsGetCapacity(TestCellsStateManager):
def setUp(self):
super(TestCellsGetCapacity, self).setUp()
diff --git a/nova/tests/compute/fake_resource_tracker.py b/nova/tests/compute/fake_resource_tracker.py
index c8f1e14647..b0fec2042b 100644
--- a/nova/tests/compute/fake_resource_tracker.py
+++ b/nova/tests/compute/fake_resource_tracker.py
@@ -20,10 +20,12 @@ class FakeResourceTracker(resource_tracker.ResourceTracker):
"""Version without a DB requirement."""
def _create(self, context, values):
+ self._write_ext_resources(values)
self.compute_node = values
self.compute_node['id'] = 1
def _update(self, context, values, prune_stats=False):
+ self._write_ext_resources(values)
self.compute_node.update(values)
def _get_service(self, context):
diff --git a/nova/tests/compute/test_arch.py b/nova/tests/compute/test_arch.py
new file mode 100644
index 0000000000..80fa274ed8
--- /dev/null
+++ b/nova/tests/compute/test_arch.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2014 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import mock
+
+from nova.compute import arch
+from nova import exception
+from nova import test
+
+
+class ArchTest(test.NoDBTestCase):
+
+ @mock.patch.object(os, "uname")
+ def test_host(self, mock_uname):
+ os.uname.return_value = (
+ 'Linux',
+ 'localhost.localdomain',
+ '3.14.8-200.fc20.x86_64',
+ '#1 SMP Mon Jun 16 21:57:53 UTC 2014',
+ 'i686'
+ )
+
+ self.assertEqual(arch.I686, arch.from_host())
+
+ def test_valid_string(self):
+ self.assertTrue(arch.is_valid("x86_64"))
+
+ def test_valid_constant(self):
+ self.assertTrue(arch.is_valid(arch.X86_64))
+
+ def test_valid_bogus(self):
+ self.assertFalse(arch.is_valid("x86_64wibble"))
+
+ def test_canonicalize_i386(self):
+ self.assertEqual(arch.I686, arch.canonicalize("i386"))
+
+ def test_canonicalize_case(self):
+ self.assertEqual(arch.X86_64, arch.canonicalize("X86_64"))
+
+ def test_canonicalize_bogus(self):
+ self.assertRaises(exception.InvalidArchitectureName,
+ arch.canonicalize,
+ "x86_64wibble")
diff --git a/nova/tests/compute/test_claims.py b/nova/tests/compute/test_claims.py
index be60f54016..0df1875c17 100644
--- a/nova/tests/compute/test_claims.py
+++ b/nova/tests/compute/test_claims.py
@@ -25,10 +25,21 @@
from nova import test
+class FakeResourceHandler(object):
+ test_called = False
+ usage_is_instance = False
+
+ def test_resources(self, usage, limits):
+ self.test_called = True
+ self.usage_is_itype = usage.get('name') is 'fakeitype'
+ return []
+
+
class DummyTracker(object):
icalled = False
rcalled = False
pci_tracker = pci_manager.PciDevTracker()
+ ext_resources_handler = FakeResourceHandler()
def abort_instance_claim(self, *args, **kwargs):
self.icalled = True
@@ -101,9 +112,6 @@ def assertRaisesRegexp(self, re_obj, e, fn, *a, **kw):
except e as ee:
self.assertTrue(re.search(re_obj, str(ee)))
- def test_cpu_unlimited(self):
- self._claim(vcpus=100000)
-
def test_memory_unlimited(self):
self._claim(memory_mb=99999999)
@@ -113,10 +121,6 @@ def test_disk_unlimited_root(self):
def test_disk_unlimited_ephemeral(self):
self._claim(ephemeral_gb=999999)
- def test_cpu_oversubscription(self):
- limits = {'vcpu': 16}
- self._claim(limits, vcpus=8)
-
def test_memory_with_overhead(self):
overhead = {'memory_mb': 8}
limits = {'memory_mb': 2048}
@@ -131,11 +135,6 @@ def test_memory_with_overhead_insufficient(self):
self._claim, limits=limits, overhead=overhead,
memory_mb=2040)
- def test_cpu_insufficient(self):
- limits = {'vcpu': 16}
- self.assertRaises(exception.ComputeResourcesUnavailable,
- self._claim, limits=limits, vcpus=17)
-
def test_memory_oversubscription(self):
self._claim(memory_mb=4096)
@@ -162,21 +161,6 @@ def test_disk_and_memory_insufficient(self):
self._claim, limits=limits, root_gb=10, ephemeral_gb=40,
memory_mb=16384)
- def test_disk_and_cpu_insufficient(self):
- limits = {'disk_gb': 45, 'vcpu': 16}
- self.assertRaisesRegexp(re.compile("disk.*vcpus", re.IGNORECASE),
- exception.ComputeResourcesUnavailable,
- self._claim, limits=limits, root_gb=10, ephemeral_gb=40,
- vcpus=17)
-
- def test_disk_and_cpu_and_memory_insufficient(self):
- limits = {'disk_gb': 45, 'vcpu': 16, 'memory_mb': 8192}
- pat = "memory.*disk.*vcpus"
- self.assertRaisesRegexp(re.compile(pat, re.IGNORECASE),
- exception.ComputeResourcesUnavailable,
- self._claim, limits=limits, root_gb=10, ephemeral_gb=40,
- vcpus=17, memory_mb=16384)
-
def test_pci_pass(self):
dev_dict = {
'compute_node_id': 1,
@@ -224,6 +208,11 @@ def test_pci_pass_no_requests(self):
self._set_pci_request(claim)
claim._test_pci()
+ def test_ext_resources(self):
+ self._claim()
+ self.assertTrue(self.tracker.ext_resources_handler.test_called)
+ self.assertFalse(self.tracker.ext_resources_handler.usage_is_itype)
+
def test_abort(self):
claim = self._abort()
self.assertTrue(claim.tracker.icalled)
@@ -260,6 +249,11 @@ def _set_pci_request(self, claim):
claim.instance.update(
system_metadata={'new_pci_requests': jsonutils.dumps(request)})
+ def test_ext_resources(self):
+ self._claim()
+ self.assertTrue(self.tracker.ext_resources_handler.test_called)
+ self.assertTrue(self.tracker.ext_resources_handler.usage_is_itype)
+
def test_abort(self):
claim = self._abort()
self.assertTrue(claim.tracker.rcalled)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index ce2c2d5c0f..d7f3f9844c 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -51,6 +51,7 @@
from nova import context
from nova import db
from nova import exception
+from nova.i18n import _
from nova.image import glance
from nova.network import api as network_api
from nova.network import model as network_model
@@ -59,7 +60,6 @@
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -150,6 +150,13 @@ def prep_resize(self, ctxt, instance, instance_type, image, request_spec,
pass
+class FakeComputeTaskAPI(object):
+
+ def resize_instance(self, context, instance, extra_instance_updates,
+ scheduler_hint, flavor, reservations):
+ pass
+
+
class BaseTestCase(test.TestCase):
def setUp(self):
@@ -192,6 +199,7 @@ def fake_get_compute_nodes_in_db(context):
'free_ram_mb': 130560,
'metrics': '',
'stats': '',
+ 'numa_topology': '',
'id': 2,
'host_ip': '127.0.0.1'}]
return [objects.ComputeNode._from_db_object(
@@ -216,7 +224,7 @@ def fake_compute_node_delete(context, compute_node_id):
self.none_quotas = objects.Quotas.from_reservations(
self.context, None)
- def fake_show(meh, context, id):
+ def fake_show(meh, context, id, **kwargs):
if id:
return {'id': id, 'min_disk': None, 'min_ram': None,
'name': 'fake_name',
@@ -231,7 +239,10 @@ def fake_show(meh, context, id):
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
fake_rpcapi = FakeSchedulerAPI()
+ fake_taskapi = FakeComputeTaskAPI()
self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi)
+ self.stubs.Set(self.compute, 'compute_task_api', fake_taskapi)
+
fake_network.set_stub_network_methods(self.stubs)
fake_server_actions.stub_out_action_events(self.stubs)
@@ -241,8 +252,13 @@ def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
self.stubs.Set(network_api.API, 'get_instance_nw_info',
fake_get_nw_info)
+
+ def fake_allocate_for_instance(cls, ctxt, instance, *args, **kwargs):
+ self.assertFalse(ctxt.is_admin)
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
+
self.stubs.Set(network_api.API, 'allocate_for_instance',
- fake_get_nw_info)
+ fake_allocate_for_instance)
self.compute_api = compute.API()
# Just to make long lines short
@@ -370,7 +386,8 @@ def setUp(self):
self.context, objects.Instance(),
fake_instance.fake_db_instance())
self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw:
- {'id': self.volume_id})
+ {'id': self.volume_id,
+ 'attach_status': 'detached'})
self.stubs.Set(self.compute.driver, 'get_volume_connector',
lambda *a, **kw: None)
self.stubs.Set(self.compute.volume_api, 'initialize_connection',
@@ -624,7 +641,7 @@ def test_poll_bandwidth_usage_not_implemented(self):
time.time().AndReturn(20)
time.time().AndReturn(21)
objects.InstanceList.get_by_host(ctxt, 'fake-mini',
- use_slave=True).AndReturn([])
+ use_subordinate=True).AndReturn([])
self.compute.driver.get_all_bw_counters([]).AndRaise(
NotImplementedError)
self.mox.ReplayAll()
@@ -654,7 +671,8 @@ def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host):
mock_get_by_host.assert_called_once_with('fake-context',
self.compute.host)
mock_get_by_inst.assert_called_once_with('fake-context',
- 'fake-instance-uuid')
+ 'fake-instance-uuid',
+ use_subordinate=False)
self.assertEqual(expected_host_bdms, got_host_bdms)
def test_poll_volume_usage_disabled(self):
@@ -675,7 +693,7 @@ def test_poll_volume_usage_returns_no_vols(self):
self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
# Following methods are called.
utils.last_completed_audit_period().AndReturn((0, 0))
- self.compute._get_host_volume_bdms(ctxt).AndReturn([])
+ self.compute._get_host_volume_bdms(ctxt, use_subordinate=True).AndReturn([])
self.mox.ReplayAll()
self.flags(volume_usage_poll_interval=10)
@@ -691,7 +709,8 @@ def test_poll_volume_usage_with_data(self):
lambda x, y: [3, 4])
# All the mocks are called
utils.last_completed_audit_period().AndReturn((10, 20))
- self.compute._get_host_volume_bdms(ctxt).AndReturn([1, 2])
+ self.compute._get_host_volume_bdms(ctxt,
+ use_subordinate=True).AndReturn([1, 2])
self.compute._update_volume_usage_cache(ctxt, [3, 4])
self.mox.ReplayAll()
self.flags(volume_usage_poll_interval=10)
@@ -720,8 +739,9 @@ def test_detach_volume_usage(self):
AndReturn(bdm)
self.compute.driver.block_stats(instance['name'], 'vdb').\
AndReturn([1L, 30L, 1L, 20L, None])
- self.compute._get_host_volume_bdms(self.context).AndReturn(
- host_volume_bdms)
+ self.compute._get_host_volume_bdms(self.context,
+ use_subordinate=True).AndReturn(
+ host_volume_bdms)
self.compute.driver.get_all_volume_usage(
self.context, host_volume_bdms).AndReturn(
[{'volume': 1,
@@ -1078,6 +1098,80 @@ def test_prep_block_device_over_quota_failure(self, mock_create):
self.context, instance, bdms)
self.assertTrue(mock_create.called)
+ @mock.patch.object(nova.virt.block_device, 'get_swap')
+ @mock.patch.object(nova.virt.block_device, 'convert_blanks')
+ @mock.patch.object(nova.virt.block_device, 'convert_images')
+ @mock.patch.object(nova.virt.block_device, 'convert_snapshots')
+ @mock.patch.object(nova.virt.block_device, 'convert_volumes')
+ @mock.patch.object(nova.virt.block_device, 'convert_ephemerals')
+ @mock.patch.object(nova.virt.block_device, 'convert_swap')
+ @mock.patch.object(nova.virt.block_device, 'attach_block_devices')
+ def test_prep_block_device_with_blanks(self, attach_block_devices,
+ convert_swap, convert_ephemerals,
+ convert_volumes, convert_snapshots,
+ convert_images, convert_blanks,
+ get_swap):
+ instance = self._create_fake_instance()
+ instance['root_device_name'] = '/dev/vda'
+ root_volume = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'instance_uuid': 'fake-instance',
+ 'source_type': 'image',
+ 'destination_type': 'volume',
+ 'image_id': 'fake-image-id-1',
+ 'volume_size': 1,
+ 'boot_index': 0}))
+ blank_volume1 = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'boot_index': 1}))
+ blank_volume2 = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'volume_size': 1,
+ 'boot_index': 2}))
+ bdms = [blank_volume1, blank_volume2, root_volume]
+
+ def fake_attach_block_devices(bdm, *args, **kwargs):
+ return bdm
+
+ convert_swap.return_value = []
+ convert_ephemerals.return_value = []
+ convert_volumes.return_value = [blank_volume1, blank_volume2]
+ convert_snapshots.return_value = []
+ convert_images.return_value = [root_volume]
+ convert_blanks.return_value = []
+ attach_block_devices.side_effect = fake_attach_block_devices
+ get_swap.return_value = []
+
+ expected_block_device_info = {
+ 'root_device_name': '/dev/vda',
+ 'swap': [],
+ 'ephemerals': [],
+ 'block_device_mapping': bdms
+ }
+
+ manager = compute_manager.ComputeManager()
+ manager.use_legacy_block_device_info = False
+ block_device_info = manager._prep_block_device(self.context, instance,
+ bdms)
+
+ convert_swap.assert_called_once_with(bdms)
+ convert_ephemerals.assert_called_once_with(bdms)
+ convert_volumes.assert_called_once_with(bdms)
+ convert_snapshots.assert_called_once_with(bdms)
+ convert_images.assert_called_once_with(bdms)
+ convert_blanks.assert_called_once_with(bdms)
+
+ self.assertEqual(expected_block_device_info, block_device_info)
+ self.assertEqual(4, attach_block_devices.call_count)
+ get_swap.assert_called_once_with([])
+
class ComputeTestCase(BaseTestCase):
def test_wrap_instance_fault(self):
@@ -1518,7 +1612,7 @@ def test_fail_to_schedule_persists(self):
params = {'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING}
self._create_fake_instance(params=params)
- #check state is failed even after the periodic poll
+ # check state is failed even after the periodic poll
self.compute.periodic_tasks(context.get_admin_context())
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING})
@@ -1540,7 +1634,7 @@ def fake(*args, **kwargs):
injected_files=None, admin_password=None,
is_first_time=True, node=None,
legacy_bdm_in_spec=False)
- #check state is failed even after the periodic poll
+ # check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
@@ -1563,7 +1657,7 @@ def test_setup_block_device_over_quota_fail(self, mock_prep_block_dev):
injected_files=None, admin_password=None,
is_first_time=True, node=None,
legacy_bdm_in_spec=False)
- #check state is failed even after the periodic poll
+ # check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
@@ -1587,7 +1681,7 @@ def fake(*args, **kwargs):
injected_files=None, admin_password=None,
is_first_time=True, node=None,
legacy_bdm_in_spec=False)
- #check state is failed even after the periodic poll
+ # check state is failed even after the periodic poll
self._assert_state({'vm_state': vm_states.ERROR,
'task_state': None})
self.compute.periodic_tasks(context.get_admin_context())
@@ -1659,7 +1753,7 @@ def fake_default_block_device_names(self, *a, **args):
def test_can_terminate_on_error_state(self):
# Make sure that the instance can be terminated in ERROR state.
- #check failed to schedule --> terminate
+ # check failed to schedule --> terminate
params = {'vm_state': vm_states.ERROR}
instance = self._create_fake_instance_obj(params=params)
self.compute.terminate_instance(self.context, instance, [], [])
@@ -2118,7 +2212,8 @@ def test_power_off(self):
called = {'power_off': False}
- def fake_driver_power_off(self, instance):
+ def fake_driver_power_off(self, instance,
+ shutdown_timeout, shutdown_attempts):
called['power_off'] = True
self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off',
@@ -2825,7 +2920,7 @@ def test_snapshot_fails_cleanup_ignores_exception(self):
def _test_snapshot_deletes_image_on_failure(self, status, exc):
self.fake_image_delete_called = False
- def fake_show(self_, context, image_id):
+ def fake_show(self_, context, image_id, **kwargs):
self.assertEqual('fakesnap', image_id)
image = {'id': image_id,
'status': status}
@@ -3663,7 +3758,7 @@ def _create_server_group(self):
def _run_instance_reschedules_on_anti_affinity_violation(self, group,
hint):
- instance = jsonutils.to_primitive(self._create_fake_instance())
+ instance = self._create_fake_instance_obj()
filter_properties = {'scheduler_hints': {'group': hint}}
self.assertRaises(exception.RescheduledException,
self.compute._build_instance,
@@ -3881,7 +3976,7 @@ def check_task_state(task_state):
def _check_locked_by(self, instance_uuid, locked_by):
instance = db.instance_get_by_uuid(self.context, instance_uuid)
- self.assertEqual(instance['locked'], locked_by != None)
+ self.assertEqual(instance['locked'], locked_by is not None)
self.assertEqual(instance['locked_by'], locked_by)
return instance
@@ -4263,7 +4358,8 @@ def test_finish_resize_with_volumes(self):
volume_id = 'fake'
volume = {'instance_uuid': None,
'device_name': None,
- 'id': volume_id}
+ 'id': volume_id,
+ 'attach_status': 'detached'}
bdm = objects.BlockDeviceMapping(
**{'source_type': 'volume',
'destination_type': 'volume',
@@ -4426,10 +4522,11 @@ def fake(*args, **kwargs):
self._stub_out_resize_network_methods()
- instance = self._create_fake_instance_obj()
+ old_flavor_name = 'm1.tiny'
+ instance = self._create_fake_instance_obj(type_name=old_flavor_name)
reservations = self._ensure_quota_reservations_rolledback(instance)
- instance_type = flavors.get_default_flavor()
+ instance_type = flavors.get_flavor_by_name('m1.small')
self.compute.prep_resize(self.context, instance=instance,
instance_type=instance_type,
@@ -4449,11 +4546,33 @@ def fake(*args, **kwargs):
migration=migration,
disk_info={}, image={}, instance=instance,
reservations=reservations)
- # NOTE(comstud): error path doesn't use objects, so our object
- # is not updated. Refresh and compare against the DB.
instance.refresh()
self.assertEqual(vm_states.ERROR, instance.vm_state)
+ old_flavor = flavors.get_flavor_by_name(old_flavor_name)
+ self.assertEqual(old_flavor['memory_mb'], instance.memory_mb)
+ self.assertEqual(old_flavor['vcpus'], instance.vcpus)
+ self.assertEqual(old_flavor['root_gb'], instance.root_gb)
+ self.assertEqual(old_flavor['ephemeral_gb'], instance.ephemeral_gb)
+ self.assertEqual(old_flavor['id'], instance.instance_type_id)
+ self.assertNotEqual(instance_type['id'], instance.instance_type_id)
+
+ def test_save_instance_info(self):
+ old_flavor_name = 'm1.tiny'
+ new_flavor_name = 'm1.small'
+ instance = self._create_fake_instance_obj(type_name=old_flavor_name)
+ new_flavor = flavors.get_flavor_by_name(new_flavor_name)
+
+ self.compute._save_instance_info(instance, new_flavor,
+ instance.system_metadata)
+
+ self.assertEqual(new_flavor['memory_mb'], instance.memory_mb)
+ self.assertEqual(new_flavor['vcpus'], instance.vcpus)
+ self.assertEqual(new_flavor['root_gb'], instance.root_gb)
+ self.assertEqual(new_flavor['ephemeral_gb'], instance.ephemeral_gb)
+ self.assertEqual(new_flavor['id'], instance.instance_type_id)
+ self.assertEqual(new_flavor['id'], instance.instance_type_id)
+
def test_rebuild_instance_notification(self):
# Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
@@ -4715,7 +4834,7 @@ def throw_up(*args, **kwargs):
self.context.elevated(),
instance.uuid, 'pre-migrating')
- #verify
+ # verify
self.assertRaises(test.TestingException, self.compute.resize_instance,
self.context, instance=instance,
migration=migration, image={},
@@ -4769,7 +4888,7 @@ def throw_up(*args, **kwargs):
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
- def test_resize_instance(self):
+ def _test_resize_instance(self, clean_shutdown=True):
# Ensure instance can be migrated/resized.
instance = self._create_fake_instance_obj()
instance_type = flavors.get_default_flavor()
@@ -4803,20 +4922,31 @@ def test_resize_instance(self):
mock.patch.object(
self.compute, '_get_instance_block_device_info',
return_value='fake_bdinfo'),
- mock.patch.object(self.compute, '_terminate_volume_connections')
+ mock.patch.object(self.compute, '_terminate_volume_connections'),
+ mock.patch.object(self.compute, '_get_power_off_values',
+ return_value=(1, 2))
) as (mock_get_by_inst_uuid, mock_get_instance_vol_bdinfo,
- mock_terminate_vol_conn):
+ mock_terminate_vol_conn, mock_get_power_off_values):
self.compute.resize_instance(self.context, instance=instance,
migration=migration, image={}, reservations=[],
- instance_type=jsonutils.to_primitive(instance_type))
+ instance_type=jsonutils.to_primitive(instance_type),
+ clean_shutdown=clean_shutdown)
mock_get_instance_vol_bdinfo.assert_called_once_with(
self.context, instance, bdms='fake_bdms')
mock_terminate_vol_conn.assert_called_once_with(self.context,
instance, 'fake_bdms')
+ mock_get_power_off_values.assert_caleld_once_with(self.context,
+ instance, clean_shutdown)
self.assertEqual(migration.dest_compute, instance.host)
self.compute.terminate_instance(self.context,
self._objectify(instance), [], [])
+ def test_resize_instance(self):
+ self._test_resize_instance()
+
+ def test_resize_instance_forced_shutdown(self):
+ self._test_resize_instance(clean_shutdown=False)
+
def _test_confirm_resize(self, power_on):
# Common test case method for confirm_resize
def fake(*args, **kwargs):
@@ -5058,11 +5188,15 @@ def _test_cleanup_stored_instance_types(self, old, new, revert=False):
if revert:
flavors.extract_flavor(instance, 'old_').AndReturn(
{'instance_type_id': old})
+ flavors.extract_flavor(instance).AndReturn(
+ {'instance_type_id': new})
flavors.save_flavor_info(
sys_meta, {'instance_type_id': old}).AndReturn(sys_meta)
else:
flavors.extract_flavor(instance).AndReturn(
{'instance_type_id': new})
+ flavors.extract_flavor(instance, 'old_').AndReturn(
+ {'instance_type_id': old})
flavors.delete_flavor_info(
sys_meta, 'old_').AndReturn(sys_meta)
flavors.delete_flavor_info(
@@ -5073,7 +5207,8 @@ def _test_cleanup_stored_instance_types(self, old, new, revert=False):
revert)
self.assertEqual(res,
(sys_meta,
- {'instance_type_id': revert and old or new}))
+ {'instance_type_id': revert and old or new},
+ {'instance_type_id': revert and new or old}))
def test_cleanup_stored_instance_types_for_resize(self):
self._test_cleanup_stored_instance_types('1', '2')
@@ -5341,32 +5476,30 @@ def fakecleanup(*args, **kwargs):
# creating testdata
c = context.get_admin_context()
- inst_ref = jsonutils.to_primitive(self._create_fake_instance({
+ instance = self._create_fake_instance_obj({
'host': srchost,
'state_description': 'migrating',
- 'state': power_state.PAUSED}))
- inst_uuid = inst_ref['uuid']
- db.instance_update(c, inst_uuid,
- {'task_state': task_states.MIGRATING,
- 'power_state': power_state.PAUSED})
+ 'state': power_state.PAUSED,
+ 'task_state': task_states.MIGRATING,
+ 'power_state': power_state.PAUSED})
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
- self.compute.driver.unfilter_instance(inst_ref, [])
+ self.compute.driver.unfilter_instance(instance, [])
self.mox.StubOutWithMock(self.compute.network_api,
'migrate_instance_start')
migration = {'source_compute': srchost, 'dest_compute': dest, }
- self.compute.network_api.migrate_instance_start(c, inst_ref,
+ self.compute.network_api.migrate_instance_start(c, instance,
migration)
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'post_live_migration_at_destination')
self.compute.compute_rpcapi.post_live_migration_at_destination(
- c, inst_ref, False, dest)
+ c, instance, False, dest)
self.mox.StubOutWithMock(self.compute.network_api,
'setup_networks_on_host')
- self.compute.network_api.setup_networks_on_host(c, inst_ref,
+ self.compute.network_api.setup_networks_on_host(c, instance,
self.compute.host,
teardown=True)
self.mox.StubOutWithMock(self.compute.instance_events,
@@ -5377,7 +5510,7 @@ def fakecleanup(*args, **kwargs):
# start test
self.mox.ReplayAll()
migrate_data = {'is_shared_instance_path': False}
- self.compute._post_live_migration(c, inst_ref, dest,
+ self.compute._post_live_migration(c, instance, dest,
migrate_data=migrate_data)
self.assertIn('cleanup', result)
self.assertEqual(result['cleanup'], True)
@@ -5406,7 +5539,8 @@ def test_post_live_migration_working_correctly(self):
'migrate_instance_start'),
mock.patch.object(self.compute.compute_rpcapi,
'post_live_migration_at_destination'),
- mock.patch.object(self.compute.driver, 'unplug_vifs'),
+ mock.patch.object(self.compute.driver,
+ 'post_live_migration_at_source'),
mock.patch.object(self.compute.network_api,
'setup_networks_on_host'),
mock.patch.object(self.compute.instance_events,
@@ -5414,7 +5548,8 @@ def test_post_live_migration_working_correctly(self):
) as (
post_live_migration, unfilter_instance,
migrate_instance_start, post_live_migration_at_destination,
- unplug_vifs, setup_networks_on_host, clear_events
+ post_live_migration_at_source, setup_networks_on_host,
+ clear_events
):
self.compute._post_live_migration(c, instance, dest)
@@ -5428,7 +5563,8 @@ def test_post_live_migration_working_correctly(self):
mock.call(c, instance, migration)])
post_live_migration_at_destination.assert_has_calls([
mock.call(c, instance, False, dest)])
- unplug_vifs.assert_has_calls([mock.call(instance, [])])
+ post_live_migration_at_source.assert_has_calls(
+ [mock.call(c, instance, [])])
setup_networks_on_host.assert_has_calls([
mock.call(c, instance, self.compute.host, teardown=True)])
clear_events.assert_called_once_with(instance)
@@ -5660,8 +5796,7 @@ def test_add_instance_fault_with_remote_error(self):
exc_info = None
def fake_db_fault_create(ctxt, values):
- self.assertTrue('raise messaging.RemoteError'
- in values['details'])
+ self.assertIn('raise messaging.RemoteError', values['details'])
del values['details']
expected = {
@@ -5809,9 +5944,9 @@ def test_cleanup_running_deleted_instances_reap(self):
self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False).\
AndRaise(test.TestingException)
objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
- inst1.uuid, use_slave=True).AndReturn(bdms)
+ inst1.uuid, use_subordinate=True).AndReturn(bdms)
objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt,
- inst2.uuid, use_slave=True).AndReturn(bdms)
+ inst2.uuid, use_subordinate=True).AndReturn(bdms)
self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False).\
AndReturn(None)
@@ -5916,7 +6051,7 @@ def test_get_instance_nw_info(self):
db.instance_get_by_uuid(self.context, fake_inst['uuid'],
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.compute.network_api.get_instance_nw_info(self.context,
mox.IsA(objects.Instance)).AndReturn(fake_nw_info)
@@ -5947,13 +6082,13 @@ def test_heal_instance_info_cache(self):
'get_nw_info': 0, 'expected_instance': None}
def fake_instance_get_all_by_host(context, host,
- columns_to_join, use_slave=False):
+ columns_to_join, use_subordinate=False):
call_info['get_all_by_host'] += 1
self.assertEqual([], columns_to_join)
return instances[:]
def fake_instance_get_by_uuid(context, instance_uuid,
- columns_to_join, use_slave=False):
+ columns_to_join, use_subordinate=False):
if instance_uuid not in instance_map:
raise exception.InstanceNotFound(instance_id=instance_uuid)
call_info['get_by_uuid'] += 1
@@ -5962,7 +6097,7 @@ def fake_instance_get_by_uuid(context, instance_uuid,
return instance_map[instance_uuid]
# NOTE(comstud): Override the stub in setUp()
- def fake_get_instance_nw_info(context, instance, use_slave=False):
+ def fake_get_instance_nw_info(context, instance, use_subordinate=False):
# Note that this exception gets caught in compute/manager
# and is ignored. However, the below increment of
# 'get_nw_info' won't happen, and you'll get an assert
@@ -6042,7 +6177,7 @@ def test_poll_rescued_instances(self, unrescue, get):
def fake_instance_get_all_by_filters(context, filters,
expected_attrs=None,
- use_slave=False):
+ use_subordinate=False):
self.assertEqual(["system_metadata"], expected_attrs)
return instances
@@ -6080,16 +6215,24 @@ def test_poll_unconfirmed_resizes(self):
fake_instance.fake_db_instance(uuid='fake_uuid5',
vm_state=vm_states.ACTIVE,
task_state=None),
+ # The expceted migration result will be None instead of error
+ # since _poll_unconfirmed_resizes will not change it
+ # when the instance vm state is RESIZED and task state
+ # is deleting, see bug 1301696 for more detail
fake_instance.fake_db_instance(uuid='fake_uuid6',
vm_state=vm_states.RESIZED,
- task_state='deleting')]
+ task_state='deleting'),
+ fake_instance.fake_db_instance(uuid='fake_uuid7',
+ vm_state=vm_states.RESIZED,
+ task_state='soft-deleting')]
expected_migration_status = {'fake_uuid1': 'confirmed',
'noexist': 'error',
'fake_uuid2': 'error',
'fake_uuid3': 'error',
'fake_uuid4': None,
'fake_uuid5': 'error',
- 'fake_uuid6': 'error'}
+ 'fake_uuid6': None,
+ 'fake_uuid7': None}
migrations = []
for i, instance in enumerate(instances, start=1):
fake_mig = test_migration.fake_db_migration()
@@ -6099,7 +6242,7 @@ def test_poll_unconfirmed_resizes(self):
migrations.append(fake_mig)
def fake_instance_get_by_uuid(context, instance_uuid,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
self.assertIn('metadata', columns_to_join)
self.assertIn('system_metadata', columns_to_join)
# raise InstanceNotFound exception for uuid 'noexist'
@@ -6110,7 +6253,7 @@ def fake_instance_get_by_uuid(context, instance_uuid,
return instance
def fake_migration_get_unconfirmed_by_dest_compute(context,
- resize_confirm_window, dest_compute, use_slave=False):
+ resize_confirm_window, dest_compute, use_subordinate=False):
self.assertEqual(dest_compute, CONF.host)
return migrations
@@ -6149,8 +6292,9 @@ def fetch_instance_migration_status(instance_uuid):
self.compute._poll_unconfirmed_resizes(ctxt)
- for uuid, status in expected_migration_status.iteritems():
- self.assertEqual(status, fetch_instance_migration_status(uuid))
+ for instance_uuid, status in expected_migration_status.iteritems():
+ self.assertEqual(status,
+ fetch_instance_migration_status(instance_uuid))
def test_instance_build_timeout_mixed_instances(self):
# Tests that instances which failed to build within the configured
@@ -6167,7 +6311,7 @@ def test_instance_build_timeout_mixed_instances(self):
instance.update(filters)
old_instances.append(fake_instance.fake_db_instance(**instance))
- #not expired
+ # not expired
instances = list(old_instances) # copy the contents of old_instances
new_instance = {
'uuid': str(uuid.uuid4()),
@@ -6206,7 +6350,7 @@ def test_instance_build_timeout_mixed_instances(self):
sort_dir,
marker=None,
columns_to_join=[],
- use_slave=True,
+ use_subordinate=True,
limit=None)
self.assertThat(conductor_instance_update.mock_calls,
testtools_matchers.HasLength(len(old_instances)))
@@ -6562,7 +6706,7 @@ def test_reclaim_queued_deletes_continue_on_error(self):
objects.InstanceList.get_by_filters(
ctxt, mox.IgnoreArg(),
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS,
- use_slave=True
+ use_subordinate=True
).AndReturn(instances)
# The first instance delete fails.
@@ -6603,12 +6747,12 @@ def test_sync_power_states(self):
{'state': power_state.RUNNING})
self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
power_state.RUNNING,
- use_slave=True)
+ use_subordinate=True)
self.compute.driver.get_info(mox.IgnoreArg()).AndReturn(
{'state': power_state.SHUTDOWN})
self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(),
power_state.SHUTDOWN,
- use_slave=True)
+ use_subordinate=True)
self.mox.ReplayAll()
self.compute._sync_power_states(ctxt)
@@ -6617,7 +6761,7 @@ def _test_lifecycle_event(self, lifecycle_event, power_state):
uuid = instance['uuid']
self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
- if power_state != None:
+ if power_state is not None:
self.compute._sync_instance_power_state(
mox.IgnoreArg(),
mox.ContainsKeyValue('uuid', uuid),
@@ -6690,7 +6834,7 @@ def fake_setup_networks_on_host(self, *args, **kwargs):
self.assertEqual(vm_states.ACTIVE, instance['vm_state'])
def _get_instance_and_bdm_for_dev_defaults_tests(self):
- instance = self._create_fake_instance(
+ instance = self._create_fake_instance_obj(
params={'root_device_name': '/dev/vda'})
block_device_mapping = block_device_obj.block_device_make_list(
self.context, [fake_block_device.FakeDbBlockDeviceDict(
@@ -6705,12 +6849,11 @@ def _get_instance_and_bdm_for_dev_defaults_tests(self):
def test_default_block_device_names_empty_instance_root_dev(self):
instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
- instance['root_device_name'] = None
- self.mox.StubOutWithMock(self.compute, '_instance_update')
+ instance.root_device_name = None
+ self.mox.StubOutWithMock(objects.Instance, 'save')
self.mox.StubOutWithMock(self.compute,
'_default_device_names_for_instance')
- self.compute._instance_update(self.context, instance['uuid'],
- root_device_name='/dev/vda')
+ instance.save().AndReturn(None)
self.compute._default_device_names_for_instance(instance,
'/dev/vda', [], [],
[bdm for bdm in bdms])
@@ -6718,11 +6861,11 @@ def test_default_block_device_names_empty_instance_root_dev(self):
self.compute._default_block_device_names(self.context,
instance,
{}, bdms)
+ self.assertEqual('/dev/vda', instance.root_device_name)
def test_default_block_device_names_empty_root_device(self):
instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
bdms[0]['device_name'] = None
- self.mox.StubOutWithMock(self.compute, '_instance_update')
self.mox.StubOutWithMock(self.compute,
'_default_device_names_for_instance')
self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save')
@@ -6737,9 +6880,9 @@ def test_default_block_device_names_empty_root_device(self):
def test_default_block_device_names_no_root_device(self):
instance, bdms = self._get_instance_and_bdm_for_dev_defaults_tests()
- instance['root_device_name'] = None
+ instance.root_device_name = None
bdms[0]['device_name'] = None
- self.mox.StubOutWithMock(self.compute, '_instance_update')
+ self.mox.StubOutWithMock(objects.Instance, 'save')
self.mox.StubOutWithMock(objects.BlockDeviceMapping, 'save')
self.mox.StubOutWithMock(self.compute,
'_default_root_device_name')
@@ -6748,8 +6891,7 @@ def test_default_block_device_names_no_root_device(self):
self.compute._default_root_device_name(instance, mox.IgnoreArg(),
bdms[0]).AndReturn('/dev/vda')
- self.compute._instance_update(self.context, instance['uuid'],
- root_device_name='/dev/vda')
+ instance.save().AndReturn(None)
bdms[0].save().AndReturn(None)
self.compute._default_device_names_for_instance(instance,
'/dev/vda', [], [],
@@ -6758,6 +6900,65 @@ def test_default_block_device_names_no_root_device(self):
self.compute._default_block_device_names(self.context,
instance,
{}, bdms)
+ self.assertEqual('/dev/vda', instance.root_device_name)
+
+ def test_default_block_device_names_with_blank_volumes(self):
+ instance = self._create_fake_instance_obj()
+ image_meta = {}
+ root_volume = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 1, 'instance_uuid': 'fake-instance',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'image_id': 'fake-image-id-1',
+ 'boot_index': 0}))
+ blank_volume1 = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 2, 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'boot_index': -1}))
+ blank_volume2 = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 3, 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'boot_index': -1}))
+ ephemeral = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 4, 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'local'}))
+ swap = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict({
+ 'id': 5, 'instance_uuid': 'fake-instance',
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'guest_format': 'swap'
+ }))
+ bdms = block_device_obj.block_device_make_list(
+ self.context, [root_volume, blank_volume1, blank_volume2,
+ ephemeral, swap])
+
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_default_root_device_name',
+ return_value='/dev/vda'),
+ mock.patch.object(objects.Instance, 'save'),
+ mock.patch.object(objects.BlockDeviceMapping, 'save'),
+ mock.patch.object(self.compute,
+ '_default_device_names_for_instance')
+ ) as (default_root_device, instance_update, object_save,
+ default_device_names):
+ self.compute._default_block_device_names(self.context, instance,
+ image_meta, bdms)
+ default_root_device.assert_called_once_with(instance, image_meta,
+ bdms[0])
+ instance_update.assert_called_once_with()
+ self.assertEqual('/dev/vda', instance.root_device_name)
+ self.assertTrue(object_save.called)
+ default_device_names.assert_called_once_with(instance,
+ '/dev/vda', [bdms[-2]], [bdms[-1]],
+ [bdm for bdm in bdms[:-2]])
def test_reserve_block_device_name(self):
instance = self._create_fake_instance_obj(
@@ -6808,7 +7009,7 @@ def fake_get_nw_info(cls, ctxt, instance):
'ramdisk_id': 'fake_ramdisk_id'},
}
- def fake_show(obj, context, image_id):
+ def fake_show(obj, context, image_id, **kwargs):
if image_id:
return self.fake_image
else:
@@ -7208,7 +7409,7 @@ def fake_rpc_rebuild(context, **kwargs):
info['image_ref'] = kwargs['instance'].image_ref
info['clean'] = kwargs['instance'].obj_what_changed() == set()
- self.stubs.Set(self.compute_api.compute_rpcapi, 'rebuild_instance',
+ self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance',
fake_rpc_rebuild)
image_ref = instance["image_ref"] + '-new_image_ref'
@@ -7476,7 +7677,7 @@ def test_get(self):
instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
def fake_db_get(_context, _instance_uuid,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return exp_instance
self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
@@ -7497,7 +7698,7 @@ def test_get_with_admin_context(self):
instance_obj.INSTANCE_DEFAULT_FIELDS + ['fault']))
def fake_db_get(context, instance_uuid,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None, use_subordinate=False):
return exp_instance
self.stubs.Set(db, 'instance_get_by_uuid', fake_db_get)
@@ -9195,7 +9396,7 @@ def fake_rebuild_instance(*args, **kwargs):
self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
fake_service_is_up)
- self.stubs.Set(self.compute_api.compute_rpcapi, 'rebuild_instance',
+ self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance',
fake_rebuild_instance)
self.compute_api.evacuate(self.context.elevated(),
instance,
@@ -9861,7 +10062,7 @@ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
fake_driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, host="host",
- aggregate=jsonutils.to_primitive(self.aggr), slave_info=None)
+ aggregate=jsonutils.to_primitive(self.aggr), subordinate_info=None)
self.assertTrue(fake_driver_add_to_aggregate.called)
def test_remove_aggregate_host(self):
@@ -9875,36 +10076,36 @@ def fake_driver_remove_from_aggregate(context, aggregate, host,
self.compute.remove_aggregate_host(self.context,
aggregate=jsonutils.to_primitive(self.aggr), host="host",
- slave_info=None)
+ subordinate_info=None)
self.assertTrue(fake_driver_remove_from_aggregate.called)
- def test_add_aggregate_host_passes_slave_info_to_driver(self):
+ def test_add_aggregate_host_passes_subordinate_info_to_driver(self):
def driver_add_to_aggregate(context, aggregate, host, **kwargs):
self.assertEqual(self.context, context)
self.assertEqual(aggregate['id'], self.aggr['id'])
self.assertEqual(host, "the_host")
- self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
+ self.assertEqual("SLAVE_INFO", kwargs.get("subordinate_info"))
self.stubs.Set(self.compute.driver, "add_to_aggregate",
driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, host="the_host",
- slave_info="SLAVE_INFO",
+ subordinate_info="SLAVE_INFO",
aggregate=jsonutils.to_primitive(self.aggr))
- def test_remove_from_aggregate_passes_slave_info_to_driver(self):
+ def test_remove_from_aggregate_passes_subordinate_info_to_driver(self):
def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
self.assertEqual(self.context, context)
self.assertEqual(aggregate['id'], self.aggr['id'])
self.assertEqual(host, "the_host")
- self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
+ self.assertEqual("SLAVE_INFO", kwargs.get("subordinate_info"))
self.stubs.Set(self.compute.driver, "remove_from_aggregate",
driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context,
aggregate=jsonutils.to_primitive(self.aggr), host="the_host",
- slave_info="SLAVE_INFO")
+ subordinate_info="SLAVE_INFO")
class ComputePolicyTestCase(BaseTestCase):
@@ -10171,12 +10372,12 @@ def _reschedule(self, request_spec=None, filter_properties=None,
instance = fake_instance.fake_db_instance(uuid=instance_uuid)
instance = self._objectify(instance)
instance_type = {}
- image = None
reservations = None
- scheduler_method = self.compute.scheduler_rpcapi.prep_resize
- method_args = (instance, instance_type, image, request_spec,
- filter_properties, reservations)
+ scheduler_method = self.compute.compute_task_api.resize_instance
+ scheduler_hint = dict(filter_properties=filter_properties)
+ method_args = (instance, None, scheduler_hint, instance_type,
+ reservations)
return self.compute._reschedule(self.context, request_spec,
filter_properties, instance, scheduler_method,
@@ -10463,13 +10664,14 @@ def test_reschedule_fails_with_exception(self):
raises another exception
"""
instance = self._create_fake_instance_obj()
- method_args = (None, instance, self.instance_type, None, None,
- None)
+ scheduler_hint = dict(filter_properties={})
+ method_args = (instance, None, scheduler_hint, self.instance_type,
+ None)
self.mox.StubOutWithMock(self.compute, "_reschedule")
self.compute._reschedule(
self.context, None, None, instance,
- self.compute.scheduler_rpcapi.prep_resize, method_args,
+ self.compute.compute_task_api.resize_instance, method_args,
task_states.RESIZE_PREP).AndRaise(
InnerTestingException("Inner"))
self.mox.ReplayAll()
@@ -10488,12 +10690,14 @@ def test_reschedule_false(self):
rescheduled.
"""
instance = self._create_fake_instance_obj()
- method_args = (None, instance, self.instance_type, None, None, None)
+ scheduler_hint = dict(filter_properties={})
+ method_args = (instance, None, scheduler_hint, self.instance_type,
+ None)
self.mox.StubOutWithMock(self.compute, "_reschedule")
self.compute._reschedule(
self.context, None, None, instance,
- self.compute.scheduler_rpcapi.prep_resize, method_args,
+ self.compute.compute_task_api.resize_instance, method_args,
task_states.RESIZE_PREP).AndReturn(False)
self.mox.ReplayAll()
@@ -10509,8 +10713,10 @@ def test_reschedule_false(self):
def test_reschedule_true(self):
# If rescheduled, the original resize exception should be logged.
instance = self._create_fake_instance_obj()
- instance_p = obj_base.obj_to_primitive(instance)
- method_args = (instance_p, self.instance_type, None, {}, {}, None)
+ scheduler_hint = dict(filter_properties={})
+ method_args = (instance, None, scheduler_hint, self.instance_type,
+ None)
+
try:
raise test.TestingException("Original")
except Exception:
@@ -10520,7 +10726,7 @@ def test_reschedule_true(self):
self.mox.StubOutWithMock(self.compute, "_log_original_error")
self.compute._reschedule(self.context, {}, {},
instance,
- self.compute.scheduler_rpcapi.prep_resize, method_args,
+ self.compute.compute_task_api.resize_instance, method_args,
task_states.RESIZE_PREP, exc_info).AndReturn(True)
self.compute._log_original_error(exc_info, instance.uuid)
@@ -10535,7 +10741,7 @@ class ComputeInactiveImageTestCase(BaseTestCase):
def setUp(self):
super(ComputeInactiveImageTestCase, self).setUp()
- def fake_show(meh, context, id):
+ def fake_show(meh, context, id, **kwargs):
return {'id': id, 'min_disk': None, 'min_ram': None,
'name': 'fake_name',
'status': 'deleted',
@@ -10627,7 +10833,7 @@ def test_rebuild_with_instance_in_stopped_state(self):
"""Confirm evacuate scenario updates vm_state to stopped
if instance is in stopped state
"""
- #Initialize the VM to stopped state
+ # Initialize the VM to stopped state
db.instance_update(self.context, self.inst_ref['uuid'],
{"vm_state": vm_states.STOPPED})
self.inst_ref['vm_state'] = vm_states.STOPPED
@@ -10637,7 +10843,7 @@ def test_rebuild_with_instance_in_stopped_state(self):
self._rebuild()
- #Check the vm state is reset to stopped
+ # Check the vm state is reset to stopped
instance = db.instance_get(self.context, self.inst_ref['id'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
diff --git a/nova/tests/compute/test_compute_api.py b/nova/tests/compute/test_compute_api.py
index b05040d487..f084692d67 100644
--- a/nova/tests/compute/test_compute_api.py
+++ b/nova/tests/compute/test_compute_api.py
@@ -65,6 +65,16 @@ def setUp(self):
self.context = context.RequestContext(self.user_id,
self.project_id)
+ def _get_vm_states(self, exclude_states=None):
+ vm_state = set([vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED,
+ vm_states.SUSPENDED, vm_states.RESCUED, vm_states.STOPPED,
+ vm_states.RESIZED, vm_states.SOFT_DELETED,
+ vm_states.DELETED, vm_states.ERROR, vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED])
+ if not exclude_states:
+ exclude_states = set()
+ return vm_state - exclude_states
+
def _create_flavor(self, params=None):
flavor = {'id': 1,
'flavorid': 1,
@@ -161,6 +171,9 @@ def test_create_quota_exceeded_messages(self):
quota.QUOTAS.limit_check(self.context, metadata_items=mox.IsA(int))
quota.QUOTAS.reserve(self.context, instances=40,
cores=mox.IsA(int),
+ expire=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ user_id=mox.IgnoreArg(),
ram=mox.IsA(int)).AndRaise(quota_exception)
self.mox.ReplayAll()
@@ -244,6 +257,19 @@ def test_suspend(self):
self.assertEqual(task_states.SUSPENDING,
instance.task_state)
+ def _test_suspend_fails(self, vm_state):
+ params = dict(vm_state=vm_state)
+ instance = self._create_instance_obj(params=params)
+ self.assertIsNone(instance.task_state)
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.suspend,
+ self.context, instance)
+
+ def test_suspend_fails_invalid_states(self):
+ invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
+ for state in invalid_vm_states:
+ self._test_suspend_fails(state)
+
def test_resume(self):
# Ensure instance can be resumed (if suspended).
instance = self._create_instance_obj(
@@ -349,13 +375,19 @@ def test_stop(self):
def test_stop_stopped_instance_with_bypass(self):
self._test_stop(vm_states.STOPPED, force=True)
- def test_stop_invalid_state(self):
- params = dict(vm_state=vm_states.PAUSED)
+ def _test_stop_invalid_state(self, vm_state):
+ params = dict(vm_state=vm_state)
instance = self._create_instance_obj(params=params)
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.stop,
self.context, instance)
+ def test_stop_fails_invalid_states(self):
+ invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE,
+ vm_states.ERROR]))
+ for state in invalid_vm_states:
+ self._test_stop_invalid_state(state)
+
def test_stop_a_stopped_inst(self):
params = {'vm_state': vm_states.STOPPED}
instance = self._create_instance_obj(params=params)
@@ -539,7 +571,7 @@ def _test_downed_host_part(self, inst, updates, delete_time, delete_type):
system_metadata=inst.system_metadata)
def _test_delete(self, delete_type, **attrs):
- reservations = 'fake-resv'
+ reservations = ['fake-resv']
inst = self._create_instance_obj()
inst.update(attrs)
inst._context = self.context
@@ -593,6 +625,7 @@ def _test_delete(self, delete_type, **attrs):
self._test_delete_resizing_part(inst, deltas)
quota.QUOTAS.reserve(self.context, project_id=inst.project_id,
user_id=inst.user_id,
+ expire=mox.IgnoreArg(),
**deltas).AndReturn(reservations)
# NOTE(comstud): This is getting messy. But what we are wanting
@@ -716,6 +749,7 @@ def test_delete_forced(self):
def test_delete_fast_if_host_not_set(self):
inst = self._create_instance_obj()
inst.host = ''
+ quotas = quotas_obj.Quotas(self.context)
updates = {'progress': 0, 'task_state': task_states.DELETING}
self.mox.StubOutWithMock(inst, 'save')
@@ -735,12 +769,12 @@ def test_delete_fast_if_host_not_set(self):
db.block_device_mapping_get_all_by_instance(self.context,
inst.uuid,
- use_slave=False).AndReturn([])
+ use_subordinate=False).AndReturn([])
inst.save()
self.compute_api._create_reservations(self.context,
inst, inst.task_state,
inst.project_id, inst.user_id
- ).AndReturn(None)
+ ).AndReturn(quotas)
if self.cell_type == 'api':
rpcapi.terminate_instance(
@@ -838,7 +872,7 @@ def test_delete_soft_rollback(self):
timeutils.set_time_override(delete_time)
db.block_device_mapping_get_all_by_instance(
- self.context, inst.uuid, use_slave=False).AndReturn([])
+ self.context, inst.uuid, use_subordinate=False).AndReturn([])
inst.save().AndRaise(test.TestingException)
self.mox.ReplayAll()
@@ -1283,6 +1317,19 @@ def test_pause(self):
self.assertEqual(task_states.PAUSING,
instance.task_state)
+ def _test_pause_fails(self, vm_state):
+ params = dict(vm_state=vm_state)
+ instance = self._create_instance_obj(params=params)
+ self.assertIsNone(instance.task_state)
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.pause,
+ self.context, instance)
+
+ def test_pause_fails_invalid_states(self):
+ invalid_vm_states = self._get_vm_states(set([vm_states.ACTIVE]))
+ for state in invalid_vm_states:
+ self._test_pause_fails(state)
+
def test_unpause(self):
# Ensure instance can be unpaused.
params = dict(vm_state=vm_states.PAUSED)
@@ -1602,6 +1649,7 @@ def test_snapshot_volume_backed(self):
'properties': {'mappings': []},
'status': 'fake-status',
'location': 'far-away',
+ 'owner': 'fake-tenant',
}
expect_meta = {
@@ -1612,7 +1660,7 @@ def test_snapshot_volume_backed(self):
'is_public': False
}
- def fake_get_all_by_instance(context, instance, use_slave=False):
+ def fake_get_all_by_instance(context, instance, use_subordinate=False):
return copy.deepcopy(instance_bdms)
def fake_image_create(context, image_meta, data=None):
@@ -1790,7 +1838,7 @@ def _setup_fake_image_with_disabled_disk_config(self):
'properties': {"auto_disk_config": "Disabled"},
}
- def fake_show(obj, context, image_id):
+ def fake_show(obj, context, image_id, **kwargs):
return self.fake_image
fake_image.stub_out_image_service(self.stubs)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
@@ -1851,7 +1899,7 @@ def test_rebuild(self, _record_action_start,
_get_image.return_value = (None, image)
bdm_get_by_instance_uuid.return_value = bdms
- with mock.patch.object(self.compute_api.compute_rpcapi,
+ with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance') as rebuild_instance:
self.compute_api.rebuild(self.context, instance, image_href,
admin_pass, files_to_inject)
@@ -1861,7 +1909,7 @@ def test_rebuild(self, _record_action_start,
injected_files=files_to_inject, image_ref=image_href,
orig_image_ref=image_href,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
- preserve_ephemeral=False, kwargs={})
+ preserve_ephemeral=False, host=instance.host, kwargs={})
_check_auto_disk_config.assert_called_once_with(image=image)
_checks_for_create_and_rebuild.assert_called_once_with(self.context,
@@ -1909,7 +1957,7 @@ def get_image(context, image_href):
_get_image.side_effect = get_image
bdm_get_by_instance_uuid.return_value = bdms
- with mock.patch.object(self.compute_api.compute_rpcapi,
+ with mock.patch.object(self.compute_api.compute_task_api,
'rebuild_instance') as rebuild_instance:
self.compute_api.rebuild(self.context, instance, new_image_href,
admin_pass, files_to_inject)
@@ -1919,15 +1967,51 @@ def get_image(context, image_href):
injected_files=files_to_inject, image_ref=new_image_href,
orig_image_ref=orig_image_href,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
- preserve_ephemeral=False, kwargs={})
+ preserve_ephemeral=False, host=instance.host, kwargs={})
_check_auto_disk_config.assert_called_once_with(image=new_image)
_checks_for_create_and_rebuild.assert_called_once_with(self.context,
None, new_image, flavor, {}, [])
self.assertEqual(vm_mode.XEN, instance.vm_mode)
- @mock.patch('nova.quota.QUOTAS.commit')
- @mock.patch('nova.quota.QUOTAS.reserve')
+ def _test_check_injected_file_quota_onset_file_limit_exceeded(self,
+ side_effect):
+ injected_files = [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "foo"
+ }
+ ]
+ with mock.patch.object(quota.QUOTAS, 'limit_check',
+ side_effect=side_effect):
+ self.compute_api._check_injected_file_quota(
+ self.context, injected_files)
+
+ def test_check_injected_file_quota_onset_file_limit_exceeded(self):
+ # This is the first call to limit_check.
+ side_effect = exception.OverQuota(overs='injected_files')
+ self.assertRaises(exception.OnsetFileLimitExceeded,
+ self._test_check_injected_file_quota_onset_file_limit_exceeded,
+ side_effect)
+
+ def test_check_injected_file_quota_onset_file_path_limit(self):
+ # This is the second call to limit_check.
+ side_effect = (mock.DEFAULT,
+ exception.OverQuota(overs='injected_file_path_bytes'))
+ self.assertRaises(exception.OnsetFilePathLimitExceeded,
+ self._test_check_injected_file_quota_onset_file_limit_exceeded,
+ side_effect)
+
+ def test_check_injected_file_quota_onset_file_content_limit(self):
+ # This is the second call to limit_check but with different overs.
+ side_effect = (mock.DEFAULT,
+ exception.OverQuota(overs='injected_file_content_bytes'))
+ self.assertRaises(exception.OnsetFileContentLimitExceeded,
+ self._test_check_injected_file_quota_onset_file_limit_exceeded,
+ side_effect)
+
+ @mock.patch('nova.objects.Quotas.commit')
+ @mock.patch('nova.objects.Quotas.reserve')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstanceAction.action_start')
def test_restore(self, action_start, instance_save, quota_reserve,
@@ -2036,8 +2120,6 @@ def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot):
@mock.patch.object(objects.Instance, 'create')
@mock.patch.object(compute_api.SecurityGroupAPI, 'ensure_default')
- @mock.patch.object(compute_api.API,
- '_populate_instance_shutdown_terminate')
@mock.patch.object(compute_api.API, '_populate_instance_names')
@mock.patch.object(compute_api.API, '_populate_instance_for_create')
@mock.patch.object(cinder.API, 'get',
@@ -2045,7 +2127,6 @@ def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot):
def test_create_db_entry_for_new_instancewith_cinder_down(self, mock_get,
mock_create,
mock_names,
- mock_terminate,
mock_ensure,
mock_inst_create):
instance = self._create_instance_obj()
@@ -2172,6 +2253,36 @@ def do_test(compute_rpcapi_mock, record_mock, instance_save_mock):
do_test()
+ def _test_attach_interface_invalid_state(self, state):
+ instance = self._create_instance_obj(
+ params={'vm_state': state})
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.attach_interface,
+ self.context, instance, '', '', '', [])
+
+ def test_attach_interface_invalid_state(self):
+ for state in [vm_states.BUILDING, vm_states.DELETED,
+ vm_states.ERROR, vm_states.RESCUED,
+ vm_states.RESIZED, vm_states.SOFT_DELETED,
+ vm_states.SUSPENDED, vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED]:
+ self._test_attach_interface_invalid_state(state)
+
+ def _test_detach_interface_invalid_state(self, state):
+ instance = self._create_instance_obj(
+ params={'vm_state': state})
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.detach_interface,
+ self.context, instance, '', '', '', [])
+
+ def test_detach_interface_invalid_state(self):
+ for state in [vm_states.BUILDING, vm_states.DELETED,
+ vm_states.ERROR, vm_states.RESCUED,
+ vm_states.RESIZED, vm_states.SOFT_DELETED,
+ vm_states.SUSPENDED, vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED]:
+ self._test_detach_interface_invalid_state(state)
+
class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
def setUp(self):
diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py
index 136282da43..2bf6c7a5c8 100644
--- a/nova/tests/compute/test_compute_cells.py
+++ b/nova/tests/compute/test_compute_cells.py
@@ -16,6 +16,7 @@
Tests For Compute w/ Cells
"""
import functools
+import inspect
import mock
from oslo.config import cfg
@@ -23,9 +24,16 @@
from nova.cells import manager
from nova.compute import api as compute_api
from nova.compute import cells_api as compute_cells_api
+from nova.compute import flavors
+from nova.compute import vm_states
+from nova import context
from nova import db
+from nova import objects
+from nova.openstack.common import timeutils
from nova import quota
+from nova import test
from nova.tests.compute import test_compute
+from nova.tests import fake_instance
ORIG_COMPUTE_API = None
@@ -204,6 +212,110 @@ def cast(context, method, *args, **kwargs):
self.assertEqual(1, mock_msg.call_count)
+class CellsConductorAPIRPCRedirect(test.NoDBTestCase):
+ def setUp(self):
+ super(CellsConductorAPIRPCRedirect, self).setUp()
+
+ self.compute_api = compute_cells_api.ComputeCellsAPI()
+ self.cells_rpcapi = mock.MagicMock()
+ self.compute_api._compute_task_api.cells_rpcapi = self.cells_rpcapi
+
+ self.context = context.RequestContext('fake', 'fake')
+
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ @mock.patch.object(compute_api.API, '_provision_instances')
+ @mock.patch.object(compute_api.API, '_check_and_transform_bdm')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_validate_and_build_base_options')
+ def test_build_instances(self, _validate, _get_image, _check_bdm,
+ _provision, _record_action_start):
+ _get_image.return_value = (None, 'fake-image')
+ _validate.return_value = (None, 1)
+ _check_bdm.return_value = 'bdms'
+ _provision.return_value = 'instances'
+
+ self.compute_api.create(self.context, 'fake-flavor', 'fake-image')
+
+ # Subsequent tests in class are verifying the hooking. We don't check
+ # args since this is verified in compute test code.
+ self.assertTrue(self.cells_rpcapi.build_instances.called)
+
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ @mock.patch.object(compute_api.API, '_resize_cells_support')
+ @mock.patch.object(compute_api.API, '_reserve_quota_delta')
+ @mock.patch.object(compute_api.API, '_upsize_quota_delta')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(flavors, 'extract_flavor')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ def test_resize_instance(self, _check, _extract, _save, _upsize, _reserve,
+ _cells, _record):
+ _extract.return_value = {'name': 'fake', 'id': 'fake'}
+ orig_system_metadata = {}
+ instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata=orig_system_metadata,
+ expected_attrs=['system_metadata'])
+
+ self.compute_api.resize(self.context, instance)
+ self.assertTrue(self.cells_rpcapi.resize_instance.called)
+
+ @mock.patch.object(objects.Instance, 'save')
+ def test_live_migrate_instance(self, instance_save):
+ orig_system_metadata = {}
+ instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata=orig_system_metadata,
+ expected_attrs=['system_metadata'])
+
+ self.compute_api.live_migrate(self.context, instance,
+ True, True, 'fake_dest_host')
+
+ self.assertTrue(self.cells_rpcapi.live_migrate_instance.called)
+
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_instance(self, _record_action_start,
+ _checks_for_create_and_rebuild, _check_auto_disk_config,
+ _get_image, bdm_get_by_instance_uuid, get_flavor, instance_save):
+ orig_system_metadata = {}
+ instance = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata=orig_system_metadata,
+ expected_attrs=['system_metadata'])
+ get_flavor.return_value = ''
+ image_href = ''
+ image = {"min_ram": 10, "min_disk": 1,
+ "properties": {'architecture': 'x86_64'}}
+ admin_pass = ''
+ files_to_inject = []
+ bdms = []
+
+ _get_image.return_value = (None, image)
+ bdm_get_by_instance_uuid.return_value = bdms
+
+ self.compute_api.rebuild(self.context, instance, image_href,
+ admin_pass, files_to_inject)
+
+ self.assertTrue(self.cells_rpcapi.rebuild_instance.called)
+
+ def test_check_equal(self):
+ task_api = self.compute_api.compute_task_api
+ tests = set()
+ for (name, value) in inspect.getmembers(self, inspect.ismethod):
+ if name.startswith('test_') and name != 'test_check_equal':
+ tests.add(name[5:])
+ if tests != set(task_api.cells_compatible):
+ self.fail("Testcases not equivalent to cells_compatible list")
+
+
class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase):
def setUp(self):
super(CellsComputePolicyTestCase, self).setUp()
diff --git a/nova/tests/compute/test_compute_mgr.py b/nova/tests/compute/test_compute_mgr.py
index 1fcfec5d53..37c198bdbf 100644
--- a/nova/tests/compute/test_compute_mgr.py
+++ b/nova/tests/compute/test_compute_mgr.py
@@ -15,6 +15,7 @@
import contextlib
import time
+from cinderclient import exceptions as cinder_exception
from eventlet import event as eventlet_event
import mock
import mox
@@ -98,6 +99,52 @@ def test_allocate_network_succeeds_after_retries(self):
dhcp_options)
self.assertEqual(final_result, res)
+ def test_allocate_network_maintains_context(self):
+ # override tracker with a version that doesn't need the database:
+ class FakeResourceTracker(object):
+ def instance_claim(self, context, instance, limits):
+ return mox.MockAnything()
+
+ self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
+ self.mox.StubOutWithMock(self.compute, '_allocate_network')
+ self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
+ 'get_by_instance_uuid')
+
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ objects.BlockDeviceMappingList.get_by_instance_uuid(
+ mox.IgnoreArg(), instance.uuid).AndReturn([])
+
+ node = 'fake_node'
+ self.compute._get_resource_tracker(node).AndReturn(
+ FakeResourceTracker())
+
+ self.admin_context = False
+
+ def fake_allocate(context, *args, **kwargs):
+ if context.is_admin:
+ self.admin_context = True
+
+ # NOTE(vish): The nice mox parameter matchers here don't work well
+ # because they raise an exception that gets wrapped by
+ # the retry exception handling, so use a side effect
+ # to keep track of whether allocate was called with admin
+ # context.
+ self.compute._allocate_network(mox.IgnoreArg(), instance,
+ mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg()).WithSideEffects(fake_allocate)
+
+ self.mox.ReplayAll()
+
+ instance, nw_info = self.compute._build_instance(self.context, {}, {},
+ None, None, None, True,
+ node, instance,
+ {}, False)
+ self.assertFalse(self.admin_context,
+ "_allocate_network called with admin context")
+ self.assertEqual(vm_states.BUILDING, instance.vm_state)
+ self.assertEqual(task_states.BLOCK_DEVICE_MAPPING, instance.task_state)
+
def test_allocate_network_fails(self):
self.flags(network_allocate_retries=0)
@@ -166,7 +213,7 @@ def _do_mock_calls(defer_iptables_apply):
context.get_admin_context().AndReturn(fake_context)
db.instance_get_all_by_host(
fake_context, our_host, columns_to_join=['info_cache'],
- use_slave=False
+ use_subordinate=False
).AndReturn(startup_instances)
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_on()
@@ -248,7 +295,7 @@ def test_init_host_with_deleted_migration(self):
context.get_admin_context().AndReturn(fake_context)
db.instance_get_all_by_host(fake_context, our_host,
columns_to_join=['info_cache'],
- use_slave=False
+ use_subordinate=False
).AndReturn([])
self.compute.init_virt_events()
@@ -437,10 +484,13 @@ def test_init_instance_sets_building_tasks_error_spawning(self):
def _test_init_instance_cleans_image_states(self, instance):
with mock.patch.object(instance, 'save') as save:
self.compute._get_power_state = mock.Mock()
+ self.compute.driver.post_interrupted_snapshot_cleanup = mock.Mock()
instance.info_cache = None
instance.power_state = power_state.RUNNING
self.compute._init_instance(self.context, instance)
save.assert_called_once_with()
+ self.compute.driver.post_interrupted_snapshot_cleanup.\
+ assert_called_once_with(self.context, instance)
self.assertIsNone(instance.task_state)
def test_init_instance_cleans_image_state_pending_upload(self):
@@ -504,6 +554,26 @@ def test_init_instance_deletes_error_deleting_instance(self):
self.compute._init_instance(self.context, instance)
self.mox.VerifyAll()
+ @mock.patch('nova.context.RequestContext.elevated')
+ @mock.patch('nova.compute.utils.get_nw_info_for_instance')
+ @mock.patch(
+ 'nova.compute.manager.ComputeManager._get_instance_block_device_info')
+ @mock.patch('nova.virt.driver.ComputeDriver.destroy')
+ @mock.patch('nova.virt.driver.ComputeDriver.get_volume_connector')
+ def test_shutdown_instance_endpoint_not_found(self, mock_connector,
+ mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated):
+ mock_connector.side_effect = cinder_exception.EndpointNotFound
+ mock_elevated.return_value = self.context
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid='fake',
+ vm_state=vm_states.ERROR,
+ task_state=task_states.DELETING)
+ bdms = [mock.Mock(id=1, is_volume=True)]
+
+ self.compute._shutdown_instance(self.context, instance, bdms,
+ notify=False, try_deallocate_networks=False)
+
def _test_init_instance_retries_reboot(self, instance, reboot_type,
return_power_state):
with contextlib.nested(
@@ -658,7 +728,7 @@ def test_get_instances_on_driver(self):
inst in driver_instances]},
'created_at', 'desc', columns_to_join=None,
limit=None, marker=None,
- use_slave=True).AndReturn(
+ use_subordinate=True).AndReturn(
driver_instances)
self.mox.ReplayAll()
@@ -699,7 +769,7 @@ def test_get_instances_on_driver_fallback(self):
fake_context, filters,
'created_at', 'desc', columns_to_join=None,
limit=None, marker=None,
- use_slave=True).AndReturn(all_instances)
+ use_subordinate=True).AndReturn(all_instances)
self.mox.ReplayAll()
@@ -731,13 +801,15 @@ def fake_get(*a, **k):
self.mox.ReplayAll()
self.compute._instance_usage_audit(self.context)
- def _get_sync_instance(self, power_state, vm_state, task_state=None):
+ def _get_sync_instance(self, power_state, vm_state, task_state=None,
+ shutdown_terminate=False):
instance = objects.Instance()
instance.uuid = 'fake-uuid'
instance.power_state = power_state
instance.vm_state = vm_state
instance.host = self.compute.host
instance.task_state = task_state
+ instance.shutdown_terminate = shutdown_terminate
self.mox.StubOutWithMock(instance, 'refresh')
self.mox.StubOutWithMock(instance, 'save')
return instance
@@ -745,7 +817,7 @@ def _get_sync_instance(self, power_state, vm_state, task_state=None):
def test_sync_instance_power_state_match(self):
instance = self._get_sync_instance(power_state.RUNNING,
vm_states.ACTIVE)
- instance.refresh(use_slave=False)
+ instance.refresh(use_subordinate=False)
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
power_state.RUNNING)
@@ -753,7 +825,7 @@ def test_sync_instance_power_state_match(self):
def test_sync_instance_power_state_running_stopped(self):
instance = self._get_sync_instance(power_state.RUNNING,
vm_states.ACTIVE)
- instance.refresh(use_slave=False)
+ instance.refresh(use_subordinate=False)
instance.save()
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
@@ -761,13 +833,17 @@ def test_sync_instance_power_state_running_stopped(self):
self.assertEqual(instance.power_state, power_state.SHUTDOWN)
def _test_sync_to_stop(self, power_state, vm_state, driver_power_state,
- stop=True, force=False):
- instance = self._get_sync_instance(power_state, vm_state)
- instance.refresh(use_slave=False)
+ stop=True, force=False, shutdown_terminate=False):
+ instance = self._get_sync_instance(
+ power_state, vm_state, shutdown_terminate=shutdown_terminate)
+ instance.refresh(use_subordinate=False)
instance.save()
self.mox.StubOutWithMock(self.compute.compute_api, 'stop')
+ self.mox.StubOutWithMock(self.compute.compute_api, 'delete')
self.mox.StubOutWithMock(self.compute.compute_api, 'force_stop')
- if stop:
+ if shutdown_terminate:
+ self.compute.compute_api.delete(self.context, instance)
+ elif stop:
if force:
self.compute.compute_api.force_stop(self.context, instance)
else:
@@ -790,6 +866,11 @@ def test_sync_instance_power_state_to_stop(self):
self._test_sync_to_stop(power_state.SHUTDOWN, vm_states.STOPPED,
power_state.RUNNING, force=True)
+ def test_sync_instance_power_state_to_terminate(self):
+ self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE,
+ power_state.SHUTDOWN,
+ force=False, shutdown_terminate=True)
+
def test_sync_instance_power_state_to_no_stop(self):
for ps in (power_state.PAUSED, power_state.NOSTATE):
self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps,
@@ -826,7 +907,7 @@ def test_query_driver_power_state_and_sync_not_found_driver(
mock_sync_power_state.assert_called_once_with(self.context,
db_instance,
power_state.NOSTATE,
- use_slave=True)
+ use_subordinate=True)
def test_run_pending_deletes(self):
self.flags(instance_delete_interval=10)
@@ -859,7 +940,8 @@ def get_by_filters(self, *args, **kwargs):
{'deleted': True, 'soft_deleted': False, 'host': 'fake-mini',
'cleaned': False},
expected_attrs=['info_cache', 'security_groups',
- 'system_metadata']).AndReturn([a, b, c])
+ 'system_metadata'],
+ use_subordinate=True).AndReturn([a, b, c])
self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files')
self.compute.driver.delete_instance_files(
@@ -877,6 +959,48 @@ def get_by_filters(self, *args, **kwargs):
self.assertFalse(c.cleaned)
self.assertEqual('1', c.system_metadata['clean_attempts'])
+ def test_attach_interface_failure(self):
+ # Test that the fault methods are invoked when an attach fails
+ db_instance = fake_instance.fake_db_instance()
+ f_instance = objects.Instance._from_db_object(self.context,
+ objects.Instance(),
+ db_instance)
+ e = exception.InterfaceAttachFailed(instance_uuid=f_instance.uuid)
+
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ @mock.patch.object(self.compute.network_api,
+ 'allocate_port_for_instance',
+ side_effect=e)
+ def do_test(meth, add_fault):
+ self.assertRaises(exception.InterfaceAttachFailed,
+ self.compute.attach_interface,
+ self.context, f_instance, 'net_id', 'port_id',
+ None)
+ add_fault.assert_has_calls(
+ mock.call(self.context, f_instance, e,
+ mock.ANY))
+
+ do_test()
+
+ def test_detach_interface_failure(self):
+ # Test that the fault methods are invoked when a detach fails
+
+ # Build test data that will cause a PortNotFound exception
+ f_instance = mock.MagicMock()
+ f_instance.info_cache = mock.MagicMock()
+ f_instance.info_cache.network_info = []
+
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ @mock.patch.object(self.compute, '_set_instance_error_state')
+ def do_test(meth, add_fault):
+ self.assertRaises(exception.PortNotFound,
+ self.compute.detach_interface,
+ self.context, f_instance, 'port_id')
+ add_fault.assert_has_calls(
+ mock.call(self.context, f_instance, mock.ANY, mock.ANY))
+
+ do_test()
+
def test_swap_volume_volume_api_usage(self):
# This test ensures that volume_id arguments are passed to volume_api
# and that volume states are OK
@@ -884,11 +1008,13 @@ def test_swap_volume_volume_api_usage(self):
old_volume_id = uuidutils.generate_uuid()
volumes[old_volume_id] = {'id': old_volume_id,
'display_name': 'old_volume',
- 'status': 'detaching'}
+ 'status': 'detaching',
+ 'size': 1}
new_volume_id = uuidutils.generate_uuid()
volumes[new_volume_id] = {'id': new_volume_id,
'display_name': 'new_volume',
- 'status': 'available'}
+ 'status': 'available',
+ 'size': 2}
def fake_vol_api_begin_detaching(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
@@ -941,6 +1067,10 @@ def fake_vol_migrate_volume_completion(context, old_volume_id,
def fake_func_exc(*args, **kwargs):
raise AttributeError # Random exception
+ def fake_swap_volume(old_connection_info, new_connection_info,
+ instance, mountpoint, resize_to):
+ self.assertEqual(resize_to, 2)
+
self.stubs.Set(self.compute.volume_api, 'begin_detaching',
fake_vol_api_begin_detaching)
self.stubs.Set(self.compute.volume_api, 'roll_detaching',
@@ -961,7 +1091,7 @@ def fake_func_exc(*args, **kwargs):
self.stubs.Set(self.compute.driver, 'get_volume_connector',
lambda x: {})
self.stubs.Set(self.compute.driver, 'swap_volume',
- lambda w, x, y, z: None)
+ fake_swap_volume)
self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion',
fake_vol_migrate_volume_completion)
self.stubs.Set(db, 'block_device_mapping_update',
@@ -1271,7 +1401,7 @@ def test_remove_volume_connection(self, inst_from_db, detach, bdm_get):
inst_obj)
detach.assert_called_once_with(self.context, inst_obj, bdm)
- def test_rescue(self):
+ def _test_rescue(self, clean_shutdown=True):
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.ACTIVE)
fake_nw_info = network_model.NetworkInfo()
@@ -1287,6 +1417,7 @@ def test_rescue(self):
mock.patch.object(self.compute, '_get_rescue_image',
return_value=rescue_image_meta),
mock.patch.object(self.compute, '_notify_about_instance_usage'),
+ mock.patch.object(self.compute, '_power_off_instance'),
mock.patch.object(self.compute.driver, 'rescue'),
mock.patch.object(self.compute.conductor_api,
'notify_usage_exists'),
@@ -1295,12 +1426,12 @@ def test_rescue(self):
mock.patch.object(instance, 'save')
) as (
event_start, event_finish, elevated_context, get_nw_info,
- get_rescue_image, notify_instance_usage, driver_rescue,
- notify_usage_exists, get_power_state, instance_save
+ get_rescue_image, notify_instance_usage, power_off_instance,
+ driver_rescue, notify_usage_exists, get_power_state, instance_save
):
self.compute.rescue_instance(
self.context, instance, rescue_password='verybadpass',
- rescue_image_ref=None)
+ rescue_image_ref=None, clean_shutdown=clean_shutdown)
# assert the field values on the instance object
self.assertEqual(vm_states.RESCUED, instance.vm_state)
@@ -1324,6 +1455,9 @@ def test_rescue(self):
]
notify_instance_usage.assert_has_calls(notify_calls)
+ power_off_instance.assert_called_once_with(self.context, instance,
+ clean_shutdown)
+
driver_rescue.assert_called_once_with(
self.context, instance, fake_nw_info, rescue_image_meta,
'verybadpass')
@@ -1334,6 +1468,12 @@ def test_rescue(self):
instance_save.assert_called_once_with(
expected_task_state=task_states.RESCUING)
+ def test_rescue(self):
+ self._test_rescue()
+
+ def test_rescue_forced_shutdown(self):
+ self._test_rescue(clean_shutdown=False)
+
def test_unrescue(self):
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.RESCUED)
@@ -1509,15 +1649,18 @@ def test_set_admin_password_driver_not_implemented(self):
self._do_test_set_admin_password_driver_error(
exc, vm_states.ACTIVE, None, expected_exception)
- def test_init_host_with_partial_migration(self):
+ def _test_init_host_with_partial_migration(self, task_state=None,
+ vm_state=vm_states.ACTIVE):
our_host = self.compute.host
instance_1 = objects.Instance(self.context)
instance_1.uuid = 'foo'
- instance_1.task_state = task_states.MIGRATING
+ instance_1.task_state = task_state
+ instance_1.vm_state = vm_state
instance_1.host = 'not-' + our_host
instance_2 = objects.Instance(self.context)
instance_2.uuid = 'bar'
instance_2.task_state = None
+ instance_2.vm_state = vm_states.ACTIVE
instance_2.host = 'not-' + our_host
with contextlib.nested(
@@ -1538,6 +1681,26 @@ def test_init_host_with_partial_migration(self):
destroy.assert_called_once_with(self.context, instance_2, None,
{}, True)
+ def test_init_host_with_partial_migration_migrating(self):
+ self._test_init_host_with_partial_migration(
+ task_state=task_states.MIGRATING)
+
+ def test_init_host_with_partial_migration_resize_migrating(self):
+ self._test_init_host_with_partial_migration(
+ task_state=task_states.RESIZE_MIGRATING)
+
+ def test_init_host_with_partial_migration_resize_migrated(self):
+ self._test_init_host_with_partial_migration(
+ task_state=task_states.RESIZE_MIGRATED)
+
+ def test_init_host_with_partial_migration_finish_resize(self):
+ self._test_init_host_with_partial_migration(
+ task_state=task_states.RESIZE_FINISH)
+
+ def test_init_host_with_partial_migration_resized(self):
+ self._test_init_host_with_partial_migration(
+ vm_state=vm_states.RESIZED)
+
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
def test_error_out_instance_on_exception_not_implemented_err(self,
inst_update_mock):
@@ -1642,6 +1805,41 @@ def test_cleanup_volumes_exception_raise(self):
calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms]
self.assertEqual(calls, volume_delete.call_args_list)
+ def test_start_building(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ with mock.patch.object(self.compute, '_instance_update') as update:
+ self.compute._start_building(self.context, instance)
+ update.assert_called_once_with(
+ self.context, instance.uuid, vm_state=vm_states.BUILDING,
+ task_state=None, expected_task_state=(task_states.SCHEDULING,
+ None))
+
+ def _test_prebuild_instance_build_abort_exception(self, exc):
+ instance = fake_instance.fake_instance_obj(self.context)
+ with contextlib.nested(
+ mock.patch.object(self.compute, '_check_instance_exists'),
+ mock.patch.object(self.compute, '_start_building',
+ side_effect=exc)
+ ) as (
+ check, start
+ ):
+ # run the code
+ self.assertRaises(exception.BuildAbortException,
+ self.compute._prebuild_instance,
+ self.context, instance)
+ # assert the calls
+ check.assert_called_once_with(self.context, instance)
+ start.assert_called_once_with(self.context, instance)
+
+ def test_prebuild_instance_instance_not_found(self):
+ self._test_prebuild_instance_build_abort_exception(
+ exception.InstanceNotFound(instance_id='fake'))
+
+ def test_prebuild_instance_unexpected_deleting_task_state_err(self):
+ self._test_prebuild_instance_build_abort_exception(
+ exception.UnexpectedDeletingTaskStateError(expected='foo',
+ actual='bar'))
+
class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def setUp(self):
@@ -1707,10 +1905,10 @@ def _instance_action_events(self):
self.mox.StubOutWithMock(objects.InstanceActionEvent,
'event_finish_with_failure')
objects.InstanceActionEvent.event_start(
- self.context, self.instance['uuid'], mox.IgnoreArg(),
+ self.context, self.instance.uuid, mox.IgnoreArg(),
want_result=False)
objects.InstanceActionEvent.event_finish_with_failure(
- self.context, self.instance['uuid'], mox.IgnoreArg(),
+ self.context, self.instance.uuid, mox.IgnoreArg(),
exc_val=mox.IgnoreArg(), exc_tb=mox.IgnoreArg(),
want_result=False)
@@ -1738,6 +1936,7 @@ def test_build_and_run_instance_called_with_proper_args(self):
def test_build_abort_exception(self):
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
@@ -1748,9 +1947,11 @@ def test_build_abort_exception(self):
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(
exception.BuildAbortException(reason='',
- instance_uuid=self.instance['uuid']))
+ instance_uuid=self.instance.uuid))
self.compute._cleanup_allocated_networks(self.context, self.instance,
self.requested_networks)
+ self.compute._cleanup_volumes(self.context, self.instance.uuid,
+ self.block_device_mapping, raise_exc=False)
self.compute._set_instance_error_state(self.context, self.instance)
self._instance_action_events()
self.mox.ReplayAll()
@@ -1777,7 +1978,7 @@ def test_rescheduled_exception(self):
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(
exception.RescheduledException(reason='',
- instance_uuid=self.instance['uuid']))
+ instance_uuid=self.instance.uuid))
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
@@ -1795,10 +1996,44 @@ def test_rescheduled_exception(self):
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
+ def test_rescheduled_exception_with_non_ascii_exception(self):
+ exc = exception.NovaException(u's\xe9quence')
+ self.mox.StubOutWithMock(self.compute.driver, 'spawn')
+ self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
+ 'instance_update')
+ self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
+ self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
+ self.compute._build_networks_for_instance(self.context, self.instance,
+ self.requested_networks, self.security_groups).AndReturn(
+ self.network_info)
+ self.compute._shutdown_instance(self.context, self.instance,
+ self.block_device_mapping, self.requested_networks,
+ try_deallocate_networks=False)
+ self._notify_about_instance_usage('create.start',
+ extra_usage_info={'image_name': self.image.get('name')})
+ self._build_and_run_instance_update()
+ self.compute.driver.spawn(self.context, self.instance, self.image,
+ self.injected_files, self.admin_pass,
+ network_info=self.network_info,
+ block_device_info=self.block_device_info).AndRaise(exc)
+ self._notify_about_instance_usage('create.error',
+ fault=exc, stub=False)
+ conductor_rpcapi.ConductorAPI.instance_update(
+ self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor')
+ self.mox.ReplayAll()
+
+ self.assertRaises(exception.RescheduledException,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+
def test_rescheduled_exception_without_retry(self):
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self._do_build_instance_update()
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
@@ -1806,11 +2041,11 @@ def test_rescheduled_exception_without_retry(self):
self.block_device_mapping, self.node, self.limits,
{}).AndRaise(
exception.RescheduledException(reason='',
- instance_uuid=self.instance['uuid']))
+ instance_uuid=self.instance.uuid))
self.compute._cleanup_allocated_networks(self.context, self.instance,
self.requested_networks)
self.compute._set_instance_error_state(self.context,
- self.instance['uuid'])
+ self.instance.uuid)
self._instance_action_events()
self.mox.ReplayAll()
@@ -1826,6 +2061,8 @@ def test_rescheduled_exception_without_retry(self):
def test_rescheduled_exception_do_not_deallocate_network(self):
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'deallocate_networks_on_reschedule')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
@@ -1836,7 +2073,9 @@ def test_rescheduled_exception_do_not_deallocate_network(self):
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(
exception.RescheduledException(reason='',
- instance_uuid=self.instance['uuid']))
+ instance_uuid=self.instance.uuid))
+ self.compute.driver.deallocate_networks_on_reschedule(
+ self.instance).AndReturn(False)
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
@@ -1854,10 +2093,10 @@ def test_rescheduled_exception_do_not_deallocate_network(self):
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
- def test_rescheduled_exception_deallocate_network_if_dhcp(self):
+ def test_rescheduled_exception_deallocate_network(self):
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute.driver,
- 'macs_for_instance')
+ 'deallocate_networks_on_reschedule')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
@@ -1868,9 +2107,9 @@ def test_rescheduled_exception_deallocate_network_if_dhcp(self):
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(
exception.RescheduledException(reason='',
- instance_uuid=self.instance['uuid']))
- self.compute.driver.macs_for_instance(self.instance).AndReturn(
- {'fake': 'options'})
+ instance_uuid=self.instance.uuid))
+ self.compute.driver.deallocate_networks_on_reschedule(
+ self.instance).AndReturn(True)
self.compute._cleanup_allocated_networks(self.context, self.instance,
self.requested_networks)
self.compute.compute_task_api.build_instances(self.context,
@@ -1890,9 +2129,11 @@ def test_rescheduled_exception_deallocate_network_if_dhcp(self):
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
- def _test_build_and_run_exceptions(self, exc, set_error=False):
+ def _test_build_and_run_exceptions(self, exc, set_error=False,
+ cleanup_volumes=False):
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
+ self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self._do_build_instance_update()
@@ -1903,6 +2144,9 @@ def _test_build_and_run_exceptions(self, exc, set_error=False):
self.filter_properties).AndRaise(exc)
self.compute._cleanup_allocated_networks(self.context, self.instance,
self.requested_networks)
+ if cleanup_volumes:
+ self.compute._cleanup_volumes(self.context, self.instance.uuid,
+ self.block_device_mapping, raise_exc=False)
if set_error:
self.mox.StubOutWithMock(self.compute, '_set_instance_error_state')
self.compute._set_instance_error_state(self.context, self.instance)
@@ -1918,21 +2162,23 @@ def _test_build_and_run_exceptions(self, exc, set_error=False):
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
- self.mox.UnsetStubs()
- def test_build_and_run_instance_exceptions(self):
- exceptions = [
- exception.InstanceNotFound(instance_id=''),
+ def test_build_and_run_notfound_exception(self):
+ self._test_build_and_run_exceptions(exception.InstanceNotFound(
+ instance_id=''))
+
+ def test_build_and_run_unexpecteddeleting_exception(self):
+ self._test_build_and_run_exceptions(
exception.UnexpectedDeletingTaskStateError(expected='',
- actual='')]
- error_exceptions = [
- exception.BuildAbortException(instance_uuid='', reason=''),
- test.TestingException()]
+ actual=''))
- for exc in exceptions:
- self._test_build_and_run_exceptions(exc)
- for exc in error_exceptions:
- self._test_build_and_run_exceptions(exc, set_error=True)
+ def test_build_and_run_buildabort_exception(self):
+ self._test_build_and_run_exceptions(exception.BuildAbortException(
+ instance_uuid='', reason=''), set_error=True, cleanup_volumes=True)
+
+ def test_build_and_run_unhandled_exception(self):
+ self._test_build_and_run_exceptions(test.TestingException(),
+ set_error=True, cleanup_volumes=True)
def test_instance_not_found(self):
exc = exception.InstanceNotFound(instance_id=1)
@@ -1957,7 +2203,7 @@ def test_instance_not_found(self):
self._notify_about_instance_usage('create.end',
fault=exc, stub=False)
conductor_rpcapi.ConductorAPI.instance_update(
- self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor')
+ self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor')
self.mox.ReplayAll()
self.assertRaises(exception.InstanceNotFound,
@@ -1988,7 +2234,7 @@ def test_reschedule_on_exception(self):
network_info=self.network_info,
block_device_info=self.block_device_info).AndRaise(exc)
conductor_rpcapi.ConductorAPI.instance_update(
- self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor')
+ self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor')
self._notify_about_instance_usage('create.error',
fault=exc, stub=False)
self.mox.ReplayAll()
@@ -2073,7 +2319,7 @@ def _test_build_and_run_spawn_exceptions(self, exc):
block_device_info=self.block_device_info))
instance_update.assert_has_calls(mock.call(self.context,
- self.instance['uuid'], mock.ANY, 'conductor'))
+ self.instance.uuid, mock.ANY, 'conductor'))
_shutdown_instance.assert_called_once_with(self.context,
self.instance, self.block_device_mapping,
@@ -2127,12 +2373,12 @@ def instance_claim(self, context, instance, limits):
def test_build_resources_buildabort_reraise(self):
exc = exception.BuildAbortException(
- instance_uuid=self.instance['uuid'], reason='')
+ instance_uuid=self.instance.uuid, reason='')
self.mox.StubOutWithMock(self.compute, '_build_resources')
self.mox.StubOutWithMock(conductor_rpcapi.ConductorAPI,
'instance_update')
conductor_rpcapi.ConductorAPI.instance_update(
- self.context, self.instance['uuid'], mox.IgnoreArg(), 'conductor')
+ self.context, self.instance.uuid, mox.IgnoreArg(), 'conductor')
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
self.compute._build_resources(self.context, self.instance,
@@ -2228,7 +2474,6 @@ def test_failed_network_alloc_from_delete_raises_unexpected(self):
self.requested_networks, self.security_groups))
def test_build_resources_with_network_info_obj_on_spawn_failure(self):
- self.mox.StubOutWithMock(self.compute, '_cleanup_build_resources')
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
@@ -2238,8 +2483,6 @@ def test_build_resources_with_network_info_obj_on_spawn_failure(self):
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
self._build_resources_instance_update()
- self.compute._cleanup_build_resources(self.context, self.instance,
- self.block_device_mapping)
self.mox.ReplayAll()
test_exception = test.TestingException()
@@ -2256,7 +2499,6 @@ def fake_spawn():
self.assertEqual(test_exception, e)
def test_build_resources_cleans_up_and_reraises_on_spawn_failure(self):
- self.mox.StubOutWithMock(self.compute, '_cleanup_build_resources')
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
@@ -2266,8 +2508,6 @@ def test_build_resources_cleans_up_and_reraises_on_spawn_failure(self):
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
self._build_resources_instance_update()
- self.compute._cleanup_build_resources(self.context, self.instance,
- self.block_device_mapping)
self.mox.ReplayAll()
test_exception = test.TestingException()
@@ -2284,7 +2524,6 @@ def fake_spawn():
self.assertEqual(test_exception, e)
def test_build_resources_aborts_on_cleanup_failure(self):
- self.mox.StubOutWithMock(self.compute, '_cleanup_build_resources')
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
@@ -2292,10 +2531,9 @@ def test_build_resources_aborts_on_cleanup_failure(self):
self.network_info)
self.compute._shutdown_instance(self.context, self.instance,
self.block_device_mapping, self.requested_networks,
- try_deallocate_networks=False)
+ try_deallocate_networks=False).AndRaise(
+ test.TestingException())
self._build_resources_instance_update()
- self.compute._cleanup_build_resources(self.context, self.instance,
- self.block_device_mapping).AndRaise(test.TestingException())
self.mox.ReplayAll()
def fake_spawn():
@@ -2309,25 +2547,6 @@ def fake_spawn():
except Exception as e:
self.assertIsInstance(e, exception.BuildAbortException)
- def test_cleanup_cleans_volumes(self):
- self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
- self.compute._cleanup_volumes(self.context, self.instance['uuid'],
- self.block_device_mapping)
- self.mox.ReplayAll()
-
- self.compute._cleanup_build_resources(self.context, self.instance,
- self.block_device_mapping)
-
- def test_cleanup_reraises_volume_cleanup_failure(self):
- self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
- self.compute._cleanup_volumes(self.context, self.instance['uuid'],
- self.block_device_mapping).AndRaise(test.TestingException())
- self.mox.ReplayAll()
-
- self.assertRaises(test.TestingException,
- self.compute._cleanup_build_resources, self.context,
- self.instance, self.block_device_mapping)
-
def test_build_networks_if_not_allocated(self):
instance = fake_instance.fake_instance_obj(self.context,
system_metadata={},
@@ -2386,6 +2605,66 @@ def test_cleanup_allocated_networks_instance_not_found(self):
self.assertEqual('False',
self.instance.system_metadata['network_allocated'])
+ @mock.patch.object(conductor_rpcapi.ConductorAPI, 'instance_update')
+ def test_launched_at_in_create_end_notification(self,
+ mock_instance_update):
+
+ def fake_notify(*args, **kwargs):
+ if args[2] == 'create.end':
+ # Check that launched_at is set on the instance
+ self.assertIsNotNone(args[1].launched_at)
+
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'spawn'),
+ mock.patch.object(self.compute,
+ '_build_networks_for_instance', return_value=[]),
+ mock.patch.object(self.instance, 'save'),
+ mock.patch.object(self.compute, '_notify_about_instance_usage',
+ side_effect=fake_notify)
+ ) as (mock_spawn, mock_networks, mock_save, mock_notify):
+ self.compute._build_and_run_instance(self.context, self.instance,
+ self.image, self.injected_files, self.admin_pass,
+ self.requested_networks, self.security_groups,
+ self.block_device_mapping, self.node, self.limits,
+ self.filter_properties)
+ expected_call = mock.call(self.context, self.instance,
+ 'create.end', extra_usage_info={'message': u'Success'},
+ network_info=[])
+ create_end_call = mock_notify.call_args_list[
+ mock_notify.call_count - 1]
+ self.assertEqual(expected_call, create_end_call)
+
+ @mock.patch.object(conductor_rpcapi.ConductorAPI, 'instance_update')
+ def test_create_end_on_instance_delete(self, mock_instance_update):
+
+ def fake_notify(*args, **kwargs):
+ if args[2] == 'create.end':
+ # Check that launched_at is set on the instance
+ self.assertIsNotNone(args[1].launched_at)
+
+ exc = exception.InstanceNotFound(instance_id='')
+
+ with contextlib.nested(
+ mock.patch.object(self.compute.driver, 'spawn'),
+ mock.patch.object(self.compute,
+ '_build_networks_for_instance', return_value=[]),
+ mock.patch.object(self.instance, 'save',
+ side_effect=[None, None, exc]),
+ mock.patch.object(self.compute, '_notify_about_instance_usage',
+ side_effect=fake_notify)
+ ) as (mock_spawn, mock_networks, mock_save, mock_notify):
+ self.assertRaises(exception.InstanceNotFound,
+ self.compute._build_and_run_instance, self.context,
+ self.instance, self.image, self.injected_files,
+ self.admin_pass, self.requested_networks,
+ self.security_groups, self.block_device_mapping, self.node,
+ self.limits, self.filter_properties)
+ expected_call = mock.call(self.context, self.instance,
+ 'create.end', fault=exc)
+ create_end_call = mock_notify.call_args_list[
+ mock_notify.call_count - 1]
+ self.assertEqual(expected_call, create_end_call)
+
class ComputeManagerMigrationTestCase(test.NoDBTestCase):
def setUp(self):
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index b35d768224..bf0e60119f 100644
--- a/nova/tests/compute/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -76,7 +76,7 @@ def setUp(self):
self.data = []
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- lambda context, instance, use_slave=False: self.data)
+ lambda context, instance, use_subordinate=False: self.data)
def _update_flavor(self, flavor_info):
self.flavor = {
@@ -279,6 +279,12 @@ def setUp(self):
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': 'fake-snapshot-id-1',
+ 'boot_index': -1}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vde',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
'boot_index': -1})])
self.flavor = {'swap': 4}
self.instance = {'uuid': 'fake_instance', 'ephemeral_gb': 2}
@@ -326,11 +332,14 @@ def test_only_block_device_mapping(self):
for original, new in zip(original_bdm, self.block_device_mapping):
self.assertEqual(original.device_name, new.device_name)
- # Asser it defaults the missing one as expected
+ # Assert it defaults the missing one as expected
self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names([], [], self.block_device_mapping)
- self.assertEqual(self.block_device_mapping[1]['device_name'],
- '/dev/vdb')
+ self.assertEqual('/dev/vdb',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vdc',
+ self.block_device_mapping[2]['device_name'])
def test_with_ephemerals(self):
# Test ephemeral gets assigned
@@ -340,10 +349,13 @@ def test_with_ephemerals(self):
self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names(self.ephemerals, [],
self.block_device_mapping)
- self.assertEqual(self.block_device_mapping[1]['device_name'],
- '/dev/vdc')
+ self.assertEqual('/dev/vdc',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[2]['device_name'])
def test_with_swap(self):
# Test swap only
@@ -354,11 +366,14 @@ def test_with_swap(self):
# Test swap and block_device_mapping
self.swap[0]['device_name'] = None
self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names([], self.swap,
self.block_device_mapping)
self.assertEqual(self.swap[0]['device_name'], '/dev/vdb')
- self.assertEqual(self.block_device_mapping[1]['device_name'],
- '/dev/vdc')
+ self.assertEqual('/dev/vdc',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[2]['device_name'])
def test_all_together(self):
# Test swap missing
@@ -379,12 +394,15 @@ def test_all_together(self):
self.swap[0]['device_name'] = None
self.ephemerals[0]['device_name'] = None
self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb')
self.assertEqual(self.swap[0]['device_name'], '/dev/vdc')
- self.assertEqual(self.block_device_mapping[1]['device_name'],
- '/dev/vdd')
+ self.assertEqual('/dev/vdd',
+ self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
class UsageInfoTestCase(test.TestCase):
@@ -409,7 +427,7 @@ def fake_get_nw_info(cls, ctxt, instance):
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
- def fake_show(meh, context, id):
+ def fake_show(meh, context, id, **kwargs):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(nova.tests.image.fake._FakeImageService,
@@ -724,6 +742,28 @@ def test_get_image_meta_no_image_no_image_system_meta(self):
self.assertThat(expected, matchers.DictMatches(image_meta))
+class ComputeUtilsGetValFromSysMetadata(test.TestCase):
+
+ def test_get_value_from_system_metadata(self):
+ instance = fake_instance.fake_instance_obj('fake-context')
+ system_meta = {'int_val': 1,
+ 'int_string': '2',
+ 'not_int': 'Nope'}
+ instance.system_metadata = system_meta
+
+ result = compute_utils.get_value_from_system_metadata(
+ instance, 'int_val', int, 0)
+ self.assertEqual(1, result)
+
+ result = compute_utils.get_value_from_system_metadata(
+ instance, 'int_string', int, 0)
+ self.assertEqual(2, result)
+
+ result = compute_utils.get_value_from_system_metadata(
+ instance, 'not_int', int, 0)
+ self.assertEqual(0, result)
+
+
class ComputeUtilsGetNWInfo(test.TestCase):
def test_instance_object_none_info_cache(self):
inst = fake_instance.fake_instance_obj('fake-context',
diff --git a/nova/tests/compute/test_compute_xen.py b/nova/tests/compute/test_compute_xen.py
index 4870e37c66..dfe369345c 100644
--- a/nova/tests/compute/test_compute_xen.py
+++ b/nova/tests/compute/test_compute_xen.py
@@ -52,7 +52,7 @@ def test_sync_power_states_instance_not_found(self):
self.mox.StubOutWithMock(self.compute, '_sync_instance_power_state')
objects.InstanceList.get_by_host(ctxt,
- self.compute.host, use_slave=True).AndReturn(instance_list)
+ self.compute.host, use_subordinate=True).AndReturn(instance_list)
self.compute.driver.get_num_instances().AndReturn(1)
vm_utils.lookup(self.compute.driver._session, instance['name'],
False).AndReturn(None)
diff --git a/nova/tests/compute/test_keypairs.py b/nova/tests/compute/test_keypairs.py
index 8b8f8c10b5..97f8373620 100644
--- a/nova/tests/compute/test_keypairs.py
+++ b/nova/tests/compute/test_keypairs.py
@@ -20,7 +20,7 @@
from nova import context
from nova import db
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova import quota
from nova.tests.compute import test_compute
from nova.tests import fake_notifier
@@ -124,11 +124,13 @@ def assertInvalidKeypair(self, expected_message, name):
self.assertKeyNameRaises(exception.InvalidKeypair, msg, name)
def test_name_too_short(self):
- msg = _('Keypair name must be between 1 and 255 characters long')
+ msg = _('Keypair name must be string and between 1 '
+ 'and 255 characters long')
self.assertInvalidKeypair(msg, '')
def test_name_too_long(self):
- msg = _('Keypair name must be between 1 and 255 characters long')
+ msg = _('Keypair name must be string and between 1 '
+ 'and 255 characters long')
self.assertInvalidKeypair(msg, 'x' * 256)
def test_invalid_chars(self):
diff --git a/nova/tests/compute/test_multiple_nodes.py b/nova/tests/compute/test_multiple_nodes.py
index 44177141b6..c248ce4594 100644
--- a/nova/tests/compute/test_multiple_nodes.py
+++ b/nova/tests/compute/test_multiple_nodes.py
@@ -106,6 +106,7 @@ def fake_get_compute_nodes_in_db(context):
'deleted_at': None,
'free_ram_mb': 130560,
'metrics': '',
+ 'numa_topology': '',
'stats': '',
'id': 2,
'host_ip': '127.0.0.1'}]
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index 46b1e5ffc1..ee284d122f 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -22,6 +22,7 @@
from nova.compute import flavors
from nova.compute import resource_tracker
+from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
@@ -45,6 +46,9 @@
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
+FAKE_VIRT_STATS = {'virt_stat': 10}
+FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
+RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
@@ -64,7 +68,7 @@ def get_available_resource(self, nodename):
class FakeVirtDriver(driver.ComputeDriver):
- def __init__(self, pci_support=False):
+ def __init__(self, pci_support=False, stats=None):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
@@ -87,6 +91,8 @@ def __init__(self, pci_support=False):
'vendor_id': 'v1',
'product_id': 'p1',
'extra_info': {'extra_k1': 'v1'}}] if self.pci_support else []
+ if stats is not None:
+ self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
@@ -106,7 +112,8 @@ def get_available_resource(self, nodename):
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
-
+ if hasattr(self, 'stats'):
+ d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
@@ -160,8 +167,10 @@ def _create_compute_node(self, values=None):
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
- "stats": [{"key": "num_instances", "value": "1"}],
- "hypervisor_hostname": "fakenode",
+ "stats": {
+ "num_instances": "1",
+ },
+ "hypervisor_hostname": "fakenode",
}
if values:
compute.update(values)
@@ -314,6 +323,8 @@ def _tracker(self, host=None):
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
+ tracker.ext_resources_handler = \
+ resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
@@ -425,6 +436,7 @@ def setUp(self):
self.updated = False
self.deleted = False
+ self.update_call_count = 0
self.tracker = self._tracker()
self._migrations = {}
@@ -440,7 +452,7 @@ def setUp(self):
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
- self.tracker.update_available_resource(self.context)
+ self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
@@ -450,9 +462,8 @@ def _fake_service_get_by_compute_host(self, ctx, host):
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
+ self.update_call_count += 1
self.updated = True
- values['stats'] = [{"key": "num_instances", "value": "1"}]
-
self.compute.update(values)
return self.compute
@@ -483,6 +494,9 @@ def _fake_migration_update(self, ctxt, migration_id, values):
migration.update(values)
return migration
+ def _init_tracker(self):
+ self.tracker.update_available_resource(self.context)
+
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS):
@@ -566,6 +580,38 @@ def _driver(self):
return FakeVirtDriver(pci_support=True)
+class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
+
+ def setUp(self):
+ super(TrackerExtraResourcesTestCase, self).setUp()
+ self.driver = self._driver()
+
+ def _driver(self):
+ return FakeVirtDriver()
+
+ def test_set_empty_ext_resources(self):
+ resources = self.driver.get_available_resource(self.tracker.nodename)
+ self.assertNotIn('stats', resources)
+ self.tracker._write_ext_resources(resources)
+ self.assertIn('stats', resources)
+
+ def test_set_extra_resources(self):
+ def fake_write_resources(resources):
+ resources['stats']['resA'] = '123'
+ resources['stats']['resB'] = 12
+
+ self.stubs.Set(self.tracker.ext_resources_handler,
+ 'write_resources',
+ fake_write_resources)
+
+ resources = self.driver.get_available_resource(self.tracker.nodename)
+ self.tracker._write_ext_resources(resources)
+
+ expected = {"resA": "123", "resB": 12}
+ self.assertEqual(sorted(expected),
+ sorted(resources['stats']))
+
+
class InstanceClaimTestCase(BaseTrackerTestCase):
def test_update_usage_only_for_tracked(self):
@@ -1031,9 +1077,10 @@ def test_set_instance_host_and_node(self):
class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
"""Make sure we handle the case where the following are true:
- 1) Compute node C gets upgraded to code that looks for instance types in
+
+ #) Compute node C gets upgraded to code that looks for instance types in
system metadata. AND
- 2) C already has instances in the process of migrating that do not have
+ #) C already has instances in the process of migrating that do not have
stashed instance types.
bug 1164110
@@ -1144,3 +1191,122 @@ def test_get_host_metrics(self):
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
+
+
+class TrackerPeriodicTestCase(BaseTrackerTestCase):
+
+ def test_periodic_status_update(self):
+ # verify update called on instantiation
+ self.assertEqual(1, self.update_call_count)
+
+ # verify update not called if no change to resources
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(1, self.update_call_count)
+
+ # verify update is called when resources change
+ driver = self.tracker.driver
+ driver.memory_mb += 1
+ self.tracker.update_available_resource(self.context)
+ self.assertEqual(2, self.update_call_count)
+
+
+class StatsDictTestCase(BaseTrackerTestCase):
+ """Test stats handling for a virt driver that provides
+ stats as a dictionary.
+ """
+ def _driver(self):
+ return FakeVirtDriver(stats=FAKE_VIRT_STATS)
+
+ def _get_stats(self):
+ return jsonutils.loads(self.tracker.compute_node['stats'])
+
+ def test_virt_stats(self):
+ # start with virt driver stats
+ stats = self._get_stats()
+ self.assertEqual(FAKE_VIRT_STATS, stats)
+
+ # adding an instance should keep virt driver stats
+ self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
+ self.tracker.update_available_resource(self.context)
+
+ stats = self._get_stats()
+ expected_stats = {}
+ expected_stats.update(FAKE_VIRT_STATS)
+ expected_stats.update(self.tracker.stats)
+ self.assertEqual(expected_stats, stats)
+
+ # removing the instances should keep only virt driver stats
+ self._instances = {}
+ self.tracker.update_available_resource(self.context)
+
+ stats = self._get_stats()
+ self.assertEqual(FAKE_VIRT_STATS, stats)
+
+
+class StatsJsonTestCase(BaseTrackerTestCase):
+ """Test stats handling for a virt driver that provides
+ stats as a json string.
+ """
+ def _driver(self):
+ return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
+
+ def _get_stats(self):
+ return jsonutils.loads(self.tracker.compute_node['stats'])
+
+ def test_virt_stats(self):
+ # start with virt driver stats
+ stats = self._get_stats()
+ self.assertEqual(FAKE_VIRT_STATS, stats)
+
+ # adding an instance should keep virt driver stats
+ # and add rt stats
+ self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
+ self.tracker.update_available_resource(self.context)
+
+ stats = self._get_stats()
+ expected_stats = {}
+ expected_stats.update(FAKE_VIRT_STATS)
+ expected_stats.update(self.tracker.stats)
+ self.assertEqual(expected_stats, stats)
+
+ # removing the instances should keep only virt driver stats
+ self._instances = {}
+ self.tracker.update_available_resource(self.context)
+ stats = self._get_stats()
+ self.assertEqual(FAKE_VIRT_STATS, stats)
+
+
+class StatsInvalidJsonTestCase(BaseTrackerTestCase):
+ """Test stats handling for a virt driver that provides
+ an invalid type for stats.
+ """
+ def _driver(self):
+ return FakeVirtDriver(stats='this is not json')
+
+ def _init_tracker(self):
+ # do not do initial update in setup
+ pass
+
+ def test_virt_stats(self):
+ # should throw exception for string that does not parse as json
+ self.assertRaises(ValueError,
+ self.tracker.update_available_resource,
+ context=self.context)
+
+
+class StatsInvalidTypeTestCase(BaseTrackerTestCase):
+ """Test stats handling for a virt driver that provides
+ an invalid type for stats.
+ """
+ def _driver(self):
+ return FakeVirtDriver(stats=10)
+
+ def _init_tracker(self):
+ # do not do initial update in setup
+ pass
+
+ def test_virt_stats(self):
+ # should throw exception for incorrect stats value type
+ self.assertRaises(ValueError,
+ self.tracker.update_available_resource,
+ context=self.context)
diff --git a/nova/tests/compute/test_resources.py b/nova/tests/compute/test_resources.py
new file mode 100644
index 0000000000..db2722ccb5
--- /dev/null
+++ b/nova/tests/compute/test_resources.py
@@ -0,0 +1,344 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the compute extra resources framework."""
+
+
+from oslo.config import cfg
+from stevedore import extension
+from stevedore import named
+
+from nova.compute import resources
+from nova.compute.resources import base
+from nova.compute.resources import vcpu
+from nova import context
+from nova.i18n import _
+from nova.objects import flavor as flavor_obj
+from nova import test
+from nova.tests.fake_instance import fake_instance_obj
+
+CONF = cfg.CONF
+
+
+class FakeResourceHandler(resources.ResourceHandler):
+ def __init__(self, extensions):
+ self._mgr = \
+ named.NamedExtensionManager.make_test_instance(extensions)
+
+
+class FakeResource(base.Resource):
+
+ def __init__(self):
+ self.total_res = 0
+ self.used_res = 0
+
+ def _get_requested(self, usage):
+ if 'extra_specs' not in usage:
+ return
+ if self.resource_name not in usage['extra_specs']:
+ return
+ req = usage['extra_specs'][self.resource_name]
+ return int(req)
+
+ def _get_limit(self, limits):
+ if self.resource_name not in limits:
+ return
+ limit = limits[self.resource_name]
+ return int(limit)
+
+ def reset(self, resources, driver):
+ self.total_res = 0
+ self.used_res = 0
+
+ def test(self, usage, limits):
+ requested = self._get_requested(usage)
+ if not requested:
+ return
+
+ limit = self._get_limit(limits)
+ if not limit:
+ return
+
+ free = limit - self.used_res
+ if requested <= free:
+ return
+ else:
+ return (_('Free %(free)d < requested %(requested)d ') %
+ {'free': free, 'requested': requested})
+
+ def add_instance(self, usage):
+ requested = self._get_requested(usage)
+ if requested:
+ self.used_res += requested
+
+ def remove_instance(self, usage):
+ requested = self._get_requested(usage)
+ if requested:
+ self.used_res -= requested
+
+ def write(self, resources):
+ pass
+
+ def report_free(self):
+ return "Free %s" % (self.total_res - self.used_res)
+
+
+class ResourceA(FakeResource):
+
+ def reset(self, resources, driver):
+ # ResourceA uses a configuration option
+ self.total_res = int(CONF.resA)
+ self.used_res = 0
+ self.resource_name = 'resource:resA'
+
+ def write(self, resources):
+ resources['resA'] = self.total_res
+ resources['used_resA'] = self.used_res
+
+
+class ResourceB(FakeResource):
+
+ def reset(self, resources, driver):
+ # ResourceB uses resource details passed in parameter resources
+ self.total_res = resources['resB']
+ self.used_res = 0
+ self.resource_name = 'resource:resB'
+
+ def write(self, resources):
+ resources['resB'] = self.total_res
+ resources['used_resB'] = self.used_res
+
+
+def fake_flavor_obj(**updates):
+ flavor = flavor_obj.Flavor()
+ flavor.id = 1
+ flavor.name = 'fakeflavor'
+ flavor.memory_mb = 8000
+ flavor.vcpus = 3
+ flavor.root_gb = 11
+ flavor.ephemeral_gb = 4
+ flavor.swap = 0
+ flavor.rxtx_factor = 1.0
+ flavor.vcpu_weight = 1
+ if updates:
+ flavor.update(updates)
+ return flavor
+
+
+class BaseTestCase(test.TestCase):
+
+ def _initialize_used_res_counter(self):
+ # Initialize the value for the used resource
+ for ext in self.r_handler._mgr.extensions:
+ ext.obj.used_res = 0
+
+ def setUp(self):
+ super(BaseTestCase, self).setUp()
+
+ # initialize flavors and stub get_by_id to
+ # get flavors from here
+ self._flavors = {}
+ self.ctxt = context.get_admin_context()
+
+ # Create a flavor without extra_specs defined
+ _flavor_id = 1
+ _flavor = fake_flavor_obj(id=_flavor_id)
+ self._flavors[_flavor_id] = _flavor
+
+ # Create a flavor with extra_specs defined
+ _flavor_id = 2
+ requested_resA = 5
+ requested_resB = 7
+ requested_resC = 7
+ _extra_specs = {'resource:resA': requested_resA,
+ 'resource:resB': requested_resB,
+ 'resource:resC': requested_resC}
+ _flavor = fake_flavor_obj(id=_flavor_id,
+ extra_specs=_extra_specs)
+ self._flavors[_flavor_id] = _flavor
+
+ # create fake resource extensions and resource handler
+ _extensions = [
+ extension.Extension('resA', None, ResourceA, ResourceA()),
+ extension.Extension('resB', None, ResourceB, ResourceB()),
+ ]
+ self.r_handler = FakeResourceHandler(_extensions)
+
+ # Resources details can be passed to each plugin or can be specified as
+ # configuration options
+ driver_resources = {'resB': 5}
+ CONF.resA = '10'
+
+ # initialise the resources
+ self.r_handler.reset_resources(driver_resources, None)
+
+ def test_update_from_instance_with_extra_specs(self):
+ # Flavor with extra_specs
+ _flavor_id = 2
+ sign = 1
+ self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
+
+ expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA']
+ expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB']
+ self.assertEqual(int(expected_resA),
+ self.r_handler._mgr['resA'].obj.used_res)
+ self.assertEqual(int(expected_resB),
+ self.r_handler._mgr['resB'].obj.used_res)
+
+ def test_update_from_instance_without_extra_specs(self):
+ # Flavor id without extra spec
+ _flavor_id = 1
+ self._initialize_used_res_counter()
+ self.r_handler.resource_list = []
+ sign = 1
+ self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
+ self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res)
+ self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res)
+
+ def test_write_resources(self):
+ self._initialize_used_res_counter()
+ extra_resources = {}
+ expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0}
+ self.r_handler.write_resources(extra_resources)
+ self.assertEqual(expected, extra_resources)
+
+ def test_test_resources_without_extra_specs(self):
+ limits = {}
+ # Flavor id without extra_specs
+ flavor = self._flavors[1]
+ result = self.r_handler.test_resources(flavor, limits)
+ self.assertEqual([None, None], result)
+
+ def test_test_resources_with_limits_for_different_resource(self):
+ limits = {'resource:resC': 20}
+ # Flavor id with extra_specs
+ flavor = self._flavors[2]
+ result = self.r_handler.test_resources(flavor, limits)
+ self.assertEqual([None, None], result)
+
+ def test_passing_test_resources(self):
+ limits = {'resource:resA': 10, 'resource:resB': 20}
+ # Flavor id with extra_specs
+ flavor = self._flavors[2]
+ self._initialize_used_res_counter()
+ result = self.r_handler.test_resources(flavor, limits)
+ self.assertEqual([None, None], result)
+
+ def test_failing_test_resources_for_single_resource(self):
+ limits = {'resource:resA': 4, 'resource:resB': 20}
+ # Flavor id with extra_specs
+ flavor = self._flavors[2]
+ self._initialize_used_res_counter()
+ result = self.r_handler.test_resources(flavor, limits)
+ expected = ['Free 4 < requested 5 ', None]
+ self.assertEqual(sorted(expected),
+ sorted(result))
+
+ def test_empty_resource_handler(self):
+ """An empty resource handler has no resource extensions,
+ should have no effect, and should raise no exceptions.
+ """
+ empty_r_handler = FakeResourceHandler([])
+
+ resources = {}
+ empty_r_handler.reset_resources(resources, None)
+
+ flavor = self._flavors[1]
+ sign = 1
+ empty_r_handler.update_from_instance(flavor, sign)
+
+ limits = {}
+ test_result = empty_r_handler.test_resources(flavor, limits)
+ self.assertEqual([], test_result)
+
+ sign = -1
+ empty_r_handler.update_from_instance(flavor, sign)
+
+ extra_resources = {}
+ expected_extra_resources = extra_resources
+ empty_r_handler.write_resources(extra_resources)
+ self.assertEqual(expected_extra_resources, extra_resources)
+
+ empty_r_handler.report_free_resources()
+
+ def test_vcpu_resource_load(self):
+ # load the vcpu example
+ names = ['vcpu']
+ real_r_handler = resources.ResourceHandler(names)
+ ext_names = real_r_handler._mgr.names()
+ self.assertEqual(names, ext_names)
+
+ # check the extension loaded is the one we expect
+ # and an instance of the object has been created
+ ext = real_r_handler._mgr['vcpu']
+ self.assertIsInstance(ext.obj, vcpu.VCPU)
+
+
+class TestVCPU(test.TestCase):
+
+ def setUp(self):
+ super(TestVCPU, self).setUp()
+ self._vcpu = vcpu.VCPU()
+ self._vcpu._total = 10
+ self._vcpu._used = 0
+ self._flavor = fake_flavor_obj(vcpus=5)
+ self._big_flavor = fake_flavor_obj(vcpus=20)
+ self._instance = fake_instance_obj(None)
+
+ def test_reset(self):
+ # set vcpu values to something different to test reset
+ self._vcpu._total = 10
+ self._vcpu._used = 5
+
+ driver_resources = {'vcpus': 20}
+ self._vcpu.reset(driver_resources, None)
+ self.assertEqual(20, self._vcpu._total)
+ self.assertEqual(0, self._vcpu._used)
+
+ def test_add_and_remove_instance(self):
+ self._vcpu.add_instance(self._flavor)
+ self.assertEqual(10, self._vcpu._total)
+ self.assertEqual(5, self._vcpu._used)
+
+ self._vcpu.remove_instance(self._flavor)
+ self.assertEqual(10, self._vcpu._total)
+ self.assertEqual(0, self._vcpu._used)
+
+ def test_test_pass_limited(self):
+ result = self._vcpu.test(self._flavor, {'vcpu': 10})
+ self.assertIsNone(result, 'vcpu test failed when it should pass')
+
+ def test_test_pass_unlimited(self):
+ result = self._vcpu.test(self._big_flavor, {})
+ self.assertIsNone(result, 'vcpu test failed when it should pass')
+
+ def test_test_fail(self):
+ result = self._vcpu.test(self._flavor, {'vcpu': 2})
+ expected = _('Free CPUs 2.00 VCPUs < requested 5 VCPUs')
+ self.assertEqual(expected, result)
+
+ def test_write(self):
+ resources = {'stats': {}}
+ self._vcpu.write(resources)
+ expected = {
+ 'vcpus': 10,
+ 'vcpus_used': 0,
+ 'stats': {
+ 'num_vcpus': 10,
+ 'num_vcpus_used': 0
+ }
+ }
+ self.assertEqual(sorted(expected),
+ sorted(resources))
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index 5e8e45c046..1aa8550b55 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -105,13 +105,13 @@ def _test_compute_api(self, method, rpc_method, **kwargs):
def test_add_aggregate_host(self):
self._test_compute_api('add_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
- slave_info={})
+ subordinate_info={})
# NOTE(russellb) Havana compat
self.flags(compute='havana', group='upgrade_levels')
self._test_compute_api('add_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
- slave_info={}, version='2.14')
+ subordinate_info={}, version='2.14')
def test_add_fixed_ip_to_instance(self):
self._test_compute_api('add_fixed_ip_to_instance', 'cast',
@@ -558,13 +558,13 @@ def test_refresh_security_group_members(self):
def test_remove_aggregate_host(self):
self._test_compute_api('remove_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
- slave_info={})
+ subordinate_info={})
# NOTE(russellb) Havana compat
self.flags(compute='havana', group='upgrade_levels')
self._test_compute_api('remove_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
- slave_info={}, version='2.15')
+ subordinate_info={}, version='2.15')
def test_remove_fixed_ip_from_instance(self):
self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
diff --git a/nova/tests/compute/test_shelve.py b/nova/tests/compute/test_shelve.py
index ffae1f87a8..d77a611567 100644
--- a/nova/tests/compute/test_shelve.py
+++ b/nova/tests/compute/test_shelve.py
@@ -45,11 +45,9 @@ def _fake_resources():
class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
- def _shelve_instance(self, shelved_offload_time):
+ def _shelve_instance(self, shelved_offload_time, clean_shutdown=True):
CONF.set_override('shelved_offload_time', shelved_offload_time)
db_instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, db_instance, {}, {}, [], None,
- None, True, None, False)
instance = objects.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['metadata', 'system_metadata'])
@@ -73,7 +71,12 @@ def _shelve_instance(self, shelved_offload_time):
self.compute._notify_about_instance_usage(self.context, instance,
'shelve.start')
- self.compute.driver.power_off(instance)
+ if clean_shutdown:
+ self.compute.driver.power_off(instance,
+ CONF.shutdown_timeout,
+ self.compute.SHUTDOWN_RETRY_INTERVAL)
+ else:
+ self.compute.driver.power_off(instance, 0, 0)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
self.compute.driver.snapshot(self.context, instance, 'fake_image_id',
@@ -115,18 +118,19 @@ def _shelve_instance(self, shelved_offload_time):
self.mox.ReplayAll()
self.compute.shelve_instance(self.context, instance,
- image_id=image_id)
+ image_id=image_id, clean_shutdown=clean_shutdown)
def test_shelve(self):
self._shelve_instance(-1)
+ def test_shelve_forced_shutdown(self):
+ self._shelve_instance(-1, clean_shutdown=False)
+
def test_shelve_offload(self):
self._shelve_instance(0)
def test_shelve_volume_backed(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, db_instance, {}, {}, [], None,
- None, True, None, False)
instance = objects.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['metadata', 'system_metadata'])
@@ -168,8 +172,6 @@ def test_shelve_volume_backed(self):
def test_unshelve(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, db_instance, {}, {}, [], None,
- None, True, None, False)
instance = objects.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['metadata', 'system_metadata'])
@@ -194,6 +196,8 @@ def test_unshelve(self):
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.rt, 'instance_claim')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_finish')
self.deleted_image_id = None
@@ -215,9 +219,12 @@ def fake_claim(context, instance, limits):
columns_to_join=['metadata', 'system_metadata'],
).AndReturn((db_instance, db_instance))
self.compute._prep_block_device(self.context, instance,
- mox.IgnoreArg()).AndReturn('fake_bdm')
+ mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
db_instance['key_data'] = None
db_instance['auto_disk_config'] = None
+ self.compute.network_api.migrate_instance_finish(
+ self.context, instance, {'source_compute': '',
+ 'dest_compute': self.compute.host})
self.compute.driver.spawn(self.context, instance, image,
injected_files=[], admin_password=None,
network_info=[],
@@ -251,24 +258,17 @@ def fake_claim(context, instance, limits):
def test_unshelve_volume_backed(self):
db_instance = jsonutils.to_primitive(self._create_fake_instance())
- host = 'fake-mini'
node = test_compute.NODENAME
limits = {}
filter_properties = {'limits': limits}
cur_time = timeutils.utcnow()
cur_time_tz = cur_time.replace(tzinfo=iso8601.iso8601.Utc())
timeutils.set_time_override(cur_time)
- self.compute.run_instance(self.context, db_instance, {}, {}, [], None,
- None, True, None, False)
instance = objects.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['metadata', 'system_metadata'])
instance.task_state = task_states.UNSHELVING
instance.save()
- sys_meta = dict(instance.system_metadata)
- sys_meta['shelved_at'] = timeutils.strtime(at=cur_time)
- sys_meta['shelved_image_id'] = None
- sys_meta['shelved_host'] = host
self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
@@ -276,6 +276,8 @@ def test_unshelve_volume_backed(self):
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.rt, 'instance_claim')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'migrate_instance_finish')
self.compute._notify_about_instance_usage(self.context, instance,
'unshelve.start')
@@ -285,9 +287,12 @@ def test_unshelve_volume_backed(self):
columns_to_join=['metadata', 'system_metadata']
).AndReturn((db_instance, db_instance))
self.compute._prep_block_device(self.context, instance,
- mox.IgnoreArg()).AndReturn('fake_bdm')
+ mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
db_instance['key_data'] = None
db_instance['auto_disk_config'] = None
+ self.compute.network_api.migrate_instance_finish(
+ self.context, instance, {'source_compute': '',
+ 'dest_compute': self.compute.host})
self.rt.instance_claim(self.context, instance, limits).AndReturn(
claims.Claim(db_instance, self.rt, _fake_resources()))
self.compute.driver.spawn(self.context, instance, None,
@@ -314,9 +319,6 @@ def test_unshelve_volume_backed(self):
filter_properties=filter_properties, node=node)
def test_shelved_poll_none_exist(self):
- instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(timeutils, 'is_older_than')
self.mox.ReplayAll()
@@ -324,8 +326,6 @@ def test_shelved_poll_none_exist(self):
def test_shelved_poll_not_timedout(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
shelved_time = timeutils.utcnow()
timeutils.set_time_override(shelved_time)
@@ -339,13 +339,7 @@ def test_shelved_poll_not_timedout(self):
self.compute._poll_shelved_instances(self.context)
def test_shelved_poll_timedout(self):
- active_instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, active_instance, {}, {}, [],
- None, None, True, None, False)
-
instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
sys_meta = utils.metadata_to_dict(instance['system_metadata'])
shelved_time = timeutils.utcnow()
timeutils.set_time_override(shelved_time)
@@ -371,8 +365,6 @@ def test_shelve(self):
fake_instance = self._create_fake_instance({'display_name': 'vm01'})
instance = jsonutils.to_primitive(fake_instance)
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
self.assertIsNone(instance['task_state'])
@@ -403,8 +395,6 @@ def test_unshelve(self):
# Ensure instance can be unshelved.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
- self.compute.run_instance(self.context, instance, {}, {}, [], None,
- None, True, None, False)
self.assertIsNone(instance['task_state'])
diff --git a/nova/tests/compute/test_stats.py b/nova/tests/compute/test_stats.py
index 1864ac7950..c90314b0fc 100644
--- a/nova/tests/compute/test_stats.py
+++ b/nova/tests/compute/test_stats.py
@@ -136,8 +136,6 @@ def test_add_stats_for_instance(self):
self.assertEqual(1, self.stats["num_vm_None"])
self.assertEqual(2, self.stats["num_vm_" + vm_states.BUILDING])
- self.assertEqual(10, self.stats.num_vcpus_used)
-
def test_calculate_workload(self):
self.stats._increment("num_task_None")
self.stats._increment("num_task_" + task_states.SCHEDULING)
@@ -191,7 +189,6 @@ def test_update_stats_for_instance_deleted(self):
self.assertEqual(0, self.stats.num_instances_for_project("1234"))
self.assertEqual(0, self.stats.num_os_type("Linux"))
self.assertEqual(0, self.stats["num_vm_" + vm_states.BUILDING])
- self.assertEqual(0, self.stats.num_vcpus_used)
def test_io_workload(self):
vms = [vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED]
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 57054a8976..d6151ad8a2 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -54,6 +54,7 @@
from nova.tests import fake_instance
from nova.tests import fake_notifier
from nova.tests import fake_server_actions
+from nova.tests import fake_utils
from nova import utils
@@ -87,6 +88,8 @@ def fake_deserialize_context(serializer, ctxt_dict):
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
+ fake_utils.stub_out_utils_spawn_n(self.stubs)
+
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
@@ -125,7 +128,7 @@ def test_instance_update(self):
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
- if self.db == None:
+ if self.db is None:
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
@@ -183,13 +186,6 @@ def test_block_device_mapping_get_all_by_instance(self):
self.context, fake_inst, legacy=False)
self.assertEqual(result, 'fake-result')
- def test_instance_info_cache_delete(self):
- self.mox.StubOutWithMock(db, 'instance_info_cache_delete')
- db.instance_info_cache_delete(self.context, 'fake-uuid')
- self.mox.ReplayAll()
- self.conductor.instance_info_cache_delete(self.context,
- {'uuid': 'fake-uuid'})
-
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
@@ -412,23 +408,23 @@ def test_instance_get_all_by_filters(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
- columns_to_join=None, use_slave=False)
+ columns_to_join=None, use_subordinate=False)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
None, False)
- def test_instance_get_all_by_filters_use_slave(self):
+ def test_instance_get_all_by_filters_use_subordinate(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
- columns_to_join=None, use_slave=True)
+ columns_to_join=None, use_subordinate=True)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None,
- use_slave=True)
+ use_subordinate=True)
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
@@ -663,8 +659,7 @@ def _setup_aggregate_with_host(self):
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
- self.assertTrue(any([host == 'bar'
- for host in aggregate_ref['hosts']]))
+ self.assertIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
@@ -677,8 +672,7 @@ def test_aggregate_host_delete(self):
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
- self.assertFalse(any([host == 'bar'
- for host in aggregate_ref['hosts']]))
+ self.assertNotIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
@@ -1119,6 +1113,21 @@ def fake_deserialize_context(serializer, ctxt_dict):
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
+ def _prepare_rebuild_args(self, update_args=None):
+ rebuild_args = {'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': 'image_ref',
+ 'orig_image_ref': 'orig_image_ref',
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'host': 'compute-host'}
+ if update_args:
+ rebuild_args.update(update_args)
+ return rebuild_args
+
def test_live_migrate(self):
inst = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
@@ -1230,10 +1239,10 @@ def test_build_instances(self):
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
db.instance_get_by_uuid(self.context, instances[0].uuid,
columns_to_join=['system_metadata'],
- use_slave=False).AndReturn(
+ use_subordinate=False).AndReturn(
jsonutils.to_primitive(instances[0]))
db.block_device_mapping_get_all_by_instance(self.context,
- instances[0].uuid, use_slave=False).AndReturn([])
+ instances[0].uuid, use_subordinate=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
@@ -1256,10 +1265,10 @@ def test_build_instances(self):
node='node1', limits=[])
db.instance_get_by_uuid(self.context, instances[1].uuid,
columns_to_join=['system_metadata'],
- use_slave=False).AndReturn(
+ use_subordinate=False).AndReturn(
jsonutils.to_primitive(instances[1]))
db.block_device_mapping_get_all_by_instance(self.context,
- instances[1].uuid, use_slave=False).AndReturn([])
+ instances[1].uuid, use_subordinate=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
@@ -1477,6 +1486,83 @@ def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
+ def test_rebuild_instance(self):
+ db_instance = jsonutils.to_primitive(self._create_fake_instance())
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'])
+ rebuild_args = self._prepare_rebuild_args({'host': inst_obj.host})
+
+ with contextlib.nested(
+ mock.patch.object(self.conductor_manager.compute_rpcapi,
+ 'rebuild_instance'),
+ mock.patch.object(self.conductor_manager.scheduler_rpcapi,
+ 'select_destinations')
+ ) as (rebuild_mock, select_dest_mock):
+ self.conductor_manager.rebuild_instance(context=self.context,
+ instance=inst_obj,
+ **rebuild_args)
+ self.assertFalse(select_dest_mock.called)
+ rebuild_mock.assert_called_once_with(self.context,
+ instance=inst_obj,
+ **rebuild_args)
+
+ def test_rebuild_instance_with_scheduler(self):
+ db_instance = jsonutils.to_primitive(self._create_fake_instance())
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'])
+ inst_obj.host = 'noselect'
+ rebuild_args = self._prepare_rebuild_args({'host': None})
+ expected_host = 'thebesthost'
+ request_spec = {}
+ filter_properties = {'ignore_hosts': [(inst_obj.host)]}
+
+ with contextlib.nested(
+ mock.patch.object(self.conductor_manager.compute_rpcapi,
+ 'rebuild_instance'),
+ mock.patch.object(self.conductor_manager.scheduler_rpcapi,
+ 'select_destinations',
+ return_value=[{'host': expected_host}]),
+ mock.patch('nova.scheduler.utils.build_request_spec',
+ return_value=request_spec)
+ ) as (rebuild_mock, select_dest_mock, bs_mock):
+ self.conductor_manager.rebuild_instance(context=self.context,
+ instance=inst_obj,
+ **rebuild_args)
+ select_dest_mock.assert_called_once_with(self.context,
+ request_spec,
+ filter_properties)
+ rebuild_args['host'] = expected_host
+ rebuild_mock.assert_called_once_with(self.context,
+ instance=inst_obj,
+ **rebuild_args)
+
+ def test_rebuild_instance_with_scheduler_no_host(self):
+ db_instance = jsonutils.to_primitive(self._create_fake_instance())
+ inst_obj = objects.Instance.get_by_uuid(self.context,
+ db_instance['uuid'])
+ inst_obj.host = 'noselect'
+ rebuild_args = self._prepare_rebuild_args({'host': None})
+ request_spec = {}
+ filter_properties = {'ignore_hosts': [(inst_obj.host)]}
+
+ with contextlib.nested(
+ mock.patch.object(self.conductor_manager.compute_rpcapi,
+ 'rebuild_instance'),
+ mock.patch.object(self.conductor_manager.scheduler_rpcapi,
+ 'select_destinations',
+ side_effect=exc.NoValidHost(reason='')),
+ mock.patch('nova.scheduler.utils.build_request_spec',
+ return_value=request_spec)
+ ) as (rebuild_mock, select_dest_mock, bs_mock):
+ self.assertRaises(exc.NoValidHost,
+ self.conductor_manager.rebuild_instance,
+ context=self.context, instance=inst_obj,
+ **rebuild_args)
+ select_dest_mock.assert_called_once_with(self.context,
+ request_spec,
+ filter_properties)
+ self.assertFalse(rebuild_mock.called)
+
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""ComputeTaskManager Tests."""
diff --git a/nova/tests/console/test_console.py b/nova/tests/console/test_console.py
index b107cdf4fe..bd3dbeb6b6 100644
--- a/nova/tests/console/test_console.py
+++ b/nova/tests/console/test_console.py
@@ -47,8 +47,6 @@ def setUp(self):
def _create_instance(self):
"""Create a test instance."""
inst = {}
- #inst['host'] = self.host
- #inst['name'] = 'instance-1234'
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
diff --git a/nova/tests/db/test_db_api.py b/nova/tests/db/test_db_api.py
index adaf68fc1f..7af3d874a5 100644
--- a/nova/tests/db/test_db_api.py
+++ b/nova/tests/db/test_db_api.py
@@ -37,7 +37,7 @@
from sqlalchemy import MetaData
from sqlalchemy.orm import exc as sqlalchemy_orm_exc
from sqlalchemy.orm import query
-from sqlalchemy.sql.expression import select
+from sqlalchemy import sql
from sqlalchemy import Table
from nova import block_device
@@ -332,7 +332,7 @@ def test_aggregate_create_with_metadata(self):
matchers.DictMatches(_get_fake_aggr_metadata()))
def test_aggregate_create_delete_create_with_metadata(self):
- #test for bug 1052479
+ # test for bug 1052479
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
@@ -1224,8 +1224,8 @@ def test_security_group_get_no_instances(self):
session = get_session()
self.mox.StubOutWithMock(sqlalchemy_api, 'get_session')
- sqlalchemy_api.get_session(use_slave=False).AndReturn(session)
- sqlalchemy_api.get_session(use_slave=False).AndReturn(session)
+ sqlalchemy_api.get_session(use_subordinate=False).AndReturn(session)
+ sqlalchemy_api.get_session(use_subordinate=False).AndReturn(session)
self.mox.ReplayAll()
security_group = db.security_group_get(self.ctxt, sid,
@@ -1635,29 +1635,29 @@ def test_instance_get_all_by_filters_tags(self):
instance = self.create_instance_with_args(
metadata={'foo': 'bar'})
self.create_instance_with_args()
- #For format 'tag-'
+ # For format 'tag-'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-key', 'value': 'foo'},
{'name': 'tag-value', 'value': 'bar'},
]})
self._assertEqualListsOfInstances([instance], result)
- #For format 'tag:'
+ # For format 'tag:'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'bar'},
]})
self._assertEqualListsOfInstances([instance], result)
- #For non-existent tag
+ # For non-existent tag
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'barred'},
]})
self.assertEqual([], result)
- #Confirm with deleted tags
+ # Confirm with deleted tags
db.instance_metadata_delete(self.ctxt, instance['uuid'], 'foo')
- #For format 'tag-'
+ # For format 'tag-'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-key', 'value': 'foo'},
@@ -1668,7 +1668,7 @@ def test_instance_get_all_by_filters_tags(self):
{'name': 'tag-value', 'value': 'bar'}
]})
self.assertEqual([], result)
- #For format 'tag:'
+ # For format 'tag:'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'bar'},
@@ -2000,6 +2000,10 @@ def test_instance_floating_address_get_all(self):
db.instance_floating_address_get_all(ctxt, instance_uuids[2])
self.assertEqual(set([float_addresses[2]]), set(real_float_addresses))
+ self.assertRaises(exception.InvalidUUID,
+ db.instance_floating_address_get_all,
+ ctxt, 'invalid_uuid')
+
def test_instance_stringified_ips(self):
instance = self.create_instance_with_args()
instance = db.instance_update(
@@ -2784,10 +2788,10 @@ def assert_multi_filter_flavor_get(filters=None):
real_it = db.flavor_get_all(self.ctxt, filters=filters)
self._assertEqualListsOfObjects(expected_it, real_it)
- #no filter
+ # no filter
assert_multi_filter_flavor_get()
- #test only with one filter
+ # test only with one filter
for filt in mem_filts:
assert_multi_filter_flavor_get(filt)
for filt in root_filts:
@@ -2797,7 +2801,7 @@ def assert_multi_filter_flavor_get(filters=None):
for filt in is_public_filts:
assert_multi_filter_flavor_get(filt)
- #test all filters together
+ # test all filters together
for mem in mem_filts:
for root in root_filts:
for disabled in disabled_filts:
@@ -3894,13 +3898,17 @@ def test_floating_ip_fixed_ip_associate_float_ip_not_found(self):
def test_floating_ip_deallocate(self):
values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'}
float_ip = self._create_floating_ip(values)
- db.floating_ip_deallocate(self.ctxt, float_ip.address)
+ rows_updated = db.floating_ip_deallocate(self.ctxt, float_ip.address)
+ self.assertEqual(1, rows_updated)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertIsNone(updated_float_ip.project_id)
self.assertIsNone(updated_float_ip.host)
self.assertFalse(updated_float_ip.auto_assigned)
+ def test_floating_ip_deallocate_address_not_found(self):
+ self.assertEqual(0, db.floating_ip_deallocate(self.ctxt, '2.2.2.2'))
+
def test_floating_ip_destroy(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
@@ -4092,7 +4100,9 @@ def test_floating_ip_update(self):
'interface': 'some_interface',
'pool': 'some_pool'
}
- db.floating_ip_update(self.ctxt, float_ip['address'], values)
+ floating_ref = db.floating_ip_update(self.ctxt, float_ip['address'],
+ values)
+ self.assertIsNotNone(floating_ref)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id'])
self._assertEqualObjects(updated_float_ip, values,
ignored_keys=['id', 'address', 'updated_at',
@@ -4617,18 +4627,18 @@ def test_block_device_mapping_get_all_by_instance(self):
uuid2 = db.instance_create(self.ctxt, {})['uuid']
bmds_values = [{'instance_uuid': uuid1,
- 'device_name': 'first'},
+ 'device_name': '/dev/vda'},
{'instance_uuid': uuid2,
- 'device_name': 'second'},
+ 'device_name': '/dev/vdb'},
{'instance_uuid': uuid2,
- 'device_name': 'third'}]
+ 'device_name': '/dev/vdc'}]
for bdm in bmds_values:
self._create_bdm(bdm)
bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1)
self.assertEqual(len(bmd), 1)
- self.assertEqual(bmd[0]['device_name'], 'first')
+ self.assertEqual(bmd[0]['device_name'], '/dev/vda')
bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2)
self.assertEqual(len(bmd), 2)
@@ -4644,27 +4654,27 @@ def test_block_device_mapping_destroy_by_instance_and_volume(self):
vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f'
vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f'
- self._create_bdm({'device_name': 'fake1', 'volume_id': vol_id1})
- self._create_bdm({'device_name': 'fake2', 'volume_id': vol_id2})
+ self._create_bdm({'device_name': '/dev/vda', 'volume_id': vol_id1})
+ self._create_bdm({'device_name': '/dev/vdb', 'volume_id': vol_id2})
uuid = self.instance['uuid']
db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid,
vol_id1)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdms), 1)
- self.assertEqual(bdms[0]['device_name'], 'fake2')
+ self.assertEqual(bdms[0]['device_name'], '/dev/vdb')
def test_block_device_mapping_destroy_by_instance_and_device(self):
- self._create_bdm({'device_name': 'fake1'})
- self._create_bdm({'device_name': 'fake2'})
+ self._create_bdm({'device_name': '/dev/vda'})
+ self._create_bdm({'device_name': '/dev/vdb'})
uuid = self.instance['uuid']
- params = (self.ctxt, uuid, 'fake1')
+ params = (self.ctxt, uuid, '/dev/vdb')
db.block_device_mapping_destroy_by_instance_and_device(*params)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdms), 1)
- self.assertEqual(bdms[0]['device_name'], 'fake2')
+ self.assertEqual(bdms[0]['device_name'], '/dev/vda')
def test_block_device_mapping_get_by_volume_id(self):
self._create_bdm({'volume_id': 'fake_id'})
@@ -5535,7 +5545,7 @@ def setUp(self):
pci_stats='',
metrics='',
extra_resources='',
- stats='')
+ stats='', numa_topology='')
# add some random stats
self.stats = dict(num_instances=3, num_proj_12345=2,
num_proj_23456=2, num_vm_building=3)
@@ -6251,10 +6261,10 @@ def check_exc_format(method, value):
except exception.NotFound as exc:
self.assertIn(unicode(value), unicode(exc))
- check_exc_format(db.get_ec2_snapshot_id_by_uuid, 'fake')
- check_exc_format(db.get_snapshot_uuid_by_ec2_id, 123456)
check_exc_format(db.get_ec2_instance_id_by_uuid, 'fake')
check_exc_format(db.get_instance_uuid_by_ec2_id, 123456)
+ check_exc_format(db.ec2_snapshot_get_by_ec2_id, 123456)
+ check_exc_format(db.ec2_snapshot_get_by_uuid, 'fake')
def test_ec2_volume_create(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
@@ -6276,25 +6286,25 @@ def test_ec2_snapshot_create(self):
self.assertIsNotNone(snap['id'])
self.assertEqual(snap['uuid'], 'fake-uuid')
- def test_get_ec2_snapshot_id_by_uuid(self):
+ def test_ec2_snapshot_get_by_ec2_id(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
- snap_id = db.get_ec2_snapshot_id_by_uuid(self.ctxt, 'fake-uuid')
- self.assertEqual(snap['id'], snap_id)
+ snap2 = db.ec2_snapshot_get_by_ec2_id(self.ctxt, snap['id'])
+ self.assertEqual(snap2['uuid'], 'fake-uuid')
- def test_get_snapshot_uuid_by_ec2_id(self):
+ def test_ec2_snapshot_get_by_uuid(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
- snap_uuid = db.get_snapshot_uuid_by_ec2_id(self.ctxt, snap['id'])
- self.assertEqual(snap_uuid, 'fake-uuid')
+ snap2 = db.ec2_snapshot_get_by_uuid(self.ctxt, 'fake-uuid')
+ self.assertEqual(snap['id'], snap2['id'])
- def test_get_ec2_snapshot_id_by_uuid_not_found(self):
+ def test_ec2_snapshot_get_by_ec2_id_not_found(self):
self.assertRaises(exception.SnapshotNotFound,
- db.get_ec2_snapshot_id_by_uuid,
- self.ctxt, 'uuid-not-present')
+ db.ec2_snapshot_get_by_ec2_id,
+ self.ctxt, 123456)
- def test_get_snapshot_uuid_by_ec2_id_not_found(self):
+ def test_ec2_snapshot_get_by_uuid_not_found(self):
self.assertRaises(exception.SnapshotNotFound,
- db.get_snapshot_uuid_by_ec2_id,
- self.ctxt, 100500)
+ db.ec2_snapshot_get_by_uuid,
+ self.ctxt, 'fake-uuid')
def test_ec2_instance_create(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
@@ -6420,12 +6430,12 @@ def test_archive_deleted_rows(self):
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
- qiim = select([self.instance_id_mappings]).where(self.
+ qiim = sql.select([self.instance_id_mappings]).where(self.
instance_id_mappings.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qiim).fetchall()
# Verify we have 6 in main
self.assertEqual(len(rows), 6)
- qsiim = select([self.shadow_instance_id_mappings]).\
+ qsiim = sql.select([self.shadow_instance_id_mappings]).\
where(self.shadow_instance_id_mappings.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qsiim).fetchall()
@@ -6489,12 +6499,12 @@ def _test_archive_deleted_rows_for_one_uuid_table(self, tablename):
where(main_table.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
- qmt = select([main_table]).where(main_table.c.uuid.in_(
+ qmt = sql.select([main_table]).where(main_table.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qmt).fetchall()
# Verify we have 6 in main
self.assertEqual(len(rows), 6)
- qst = select([shadow_table]).\
+ qst = sql.select([shadow_table]).\
where(shadow_table.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qst).fetchall()
# Verify we have 0 in shadow
@@ -6533,11 +6543,11 @@ def test_archive_deleted_rows_no_id_column(self):
where(self.dns_domains.c.domain == uuidstr0).\
values(deleted=True)
self.conn.execute(update_statement)
- qdd = select([self.dns_domains], self.dns_domains.c.domain ==
+ qdd = sql.select([self.dns_domains], self.dns_domains.c.domain ==
uuidstr0)
rows = self.conn.execute(qdd).fetchall()
self.assertEqual(len(rows), 1)
- qsdd = select([self.shadow_dns_domains],
+ qsdd = sql.select([self.shadow_dns_domains],
self.shadow_dns_domains.c.domain == uuidstr0)
rows = self.conn.execute(qsdd).fetchall()
self.assertEqual(len(rows), 0)
@@ -6598,21 +6608,21 @@ def test_archive_deleted_rows_2_tables(self):
.values(deleted=1)
self.conn.execute(update_statement2)
# Verify we have 6 in each main table
- qiim = select([self.instance_id_mappings]).where(
+ qiim = sql.select([self.instance_id_mappings]).where(
self.instance_id_mappings.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qiim).fetchall()
self.assertEqual(len(rows), 6)
- qi = select([self.instances]).where(self.instances.c.uuid.in_(
+ qi = sql.select([self.instances]).where(self.instances.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(rows), 6)
# Verify we have 0 in each shadow table
- qsiim = select([self.shadow_instance_id_mappings]).\
+ qsiim = sql.select([self.shadow_instance_id_mappings]).\
where(self.shadow_instance_id_mappings.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qsiim).fetchall()
self.assertEqual(len(rows), 0)
- qsi = select([self.shadow_instances]).\
+ qsi = sql.select([self.shadow_instances]).\
where(self.shadow_instances.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(rows), 0)
@@ -6661,9 +6671,9 @@ def _get_default_values(self):
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
- metadata=None, members=None):
+ members=None):
return db.instance_group_create(context, values, policies=policies,
- metadata=metadata, members=members)
+ members=members)
def test_instance_group_create_no_key(self):
values = self._get_default_values()
@@ -6775,15 +6785,6 @@ def test_instance_group_update(self):
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
self.assertEqual(result['name'], 'new_fake_name')
- # update metadata
- values = self._get_default_values()
- metadataInput = {'key11': 'value1',
- 'key12': 'value2'}
- values['metadata'] = metadataInput
- db.instance_group_update(self.context, id, values)
- result = db.instance_group_get(self.context, id)
- metadata = result['metadetails']
- self._assertEqualObjects(metadata, metadataInput)
# update update members
values = self._get_default_values()
members = ['instance_id1', 'instance_id2']
@@ -6804,86 +6805,6 @@ def test_instance_group_update(self):
'invalid_id', values)
-class InstanceGroupMetadataDBApiTestCase(InstanceGroupDBApiTestCase):
- def test_instance_group_metadata_on_create(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- metadata = {'key11': 'value1',
- 'key12': 'value2'}
- result = self._create_instance_group(self.context, values,
- metadata=metadata)
- ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
- 'created_at']
- self._assertEqualObjects(result, values, ignored_keys)
- self._assertEqualObjects(metadata, result['metadetails'])
-
- def test_instance_group_metadata_add(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- result = self._create_instance_group(self.context, values)
- id = result['uuid']
- metadata = db.instance_group_metadata_get(self.context, id)
- self._assertEqualObjects(metadata, {})
- metadata = {'key1': 'value1',
- 'key2': 'value2'}
- db.instance_group_metadata_add(self.context, id, metadata)
- metadata2 = db.instance_group_metadata_get(self.context, id)
- self._assertEqualObjects(metadata, metadata2)
-
- def test_instance_group_update(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- result = self._create_instance_group(self.context, values)
- id = result['uuid']
- metadata = {'key1': 'value1',
- 'key2': 'value2'}
- db.instance_group_metadata_add(self.context, id, metadata)
- metadata2 = db.instance_group_metadata_get(self.context, id)
- self._assertEqualObjects(metadata, metadata2)
- # check add with existing keys
- metadata = {'key1': 'value1',
- 'key2': 'value2',
- 'key3': 'value3'}
- db.instance_group_metadata_add(self.context, id, metadata)
- metadata3 = db.instance_group_metadata_get(self.context, id)
- self._assertEqualObjects(metadata, metadata3)
-
- def test_instance_group_delete(self):
- values = self._get_default_values()
- values['uuid'] = 'fake_id'
- result = self._create_instance_group(self.context, values)
- id = result['uuid']
- metadata = {'key1': 'value1',
- 'key2': 'value2',
- 'key3': 'value3'}
- db.instance_group_metadata_add(self.context, id, metadata)
- metadata3 = db.instance_group_metadata_get(self.context, id)
- self._assertEqualObjects(metadata, metadata3)
- db.instance_group_metadata_delete(self.context, id, 'key1')
- metadata = db.instance_group_metadata_get(self.context, id)
- self.assertNotIn('key1', metadata)
- db.instance_group_metadata_delete(self.context, id, 'key2')
- metadata = db.instance_group_metadata_get(self.context, id)
- self.assertNotIn('key2', metadata)
-
- def test_instance_group_metadata_invalid_ids(self):
- values = self._get_default_values()
- result = self._create_instance_group(self.context, values)
- id = result['uuid']
- self.assertRaises(exception.InstanceGroupNotFound,
- db.instance_group_metadata_get,
- self.context, 'invalid')
- self.assertRaises(exception.InstanceGroupNotFound,
- db.instance_group_metadata_delete, self.context,
- 'invalidid', 'key1')
- metadata = {'key1': 'value1',
- 'key2': 'value2'}
- db.instance_group_metadata_add(self.context, id, metadata)
- self.assertRaises(exception.InstanceGroupMetadataNotFound,
- db.instance_group_metadata_delete,
- self.context, id, 'invalidkey')
-
-
class InstanceGroupMembersDBApiTestCase(InstanceGroupDBApiTestCase):
def test_instance_group_members_on_create(self):
values = self._get_default_values()
diff --git a/nova/tests/db/test_migration_utils.py b/nova/tests/db/test_migration_utils.py
index d3f93710e3..6009a609e1 100644
--- a/nova/tests/db/test_migration_utils.py
+++ b/nova/tests/db/test_migration_utils.py
@@ -19,7 +19,7 @@
from sqlalchemy import Integer, String
from sqlalchemy import MetaData, Table, Column
from sqlalchemy.exc import NoSuchTableError
-from sqlalchemy.sql import select
+from sqlalchemy import sql
from sqlalchemy.types import UserDefinedType
from nova.db.sqlalchemy import api as db
@@ -62,7 +62,7 @@ def test_delete_from_select(self):
# Delete 4 rows in one chunk
column = test_table.c.id
- query_delete = select([column],
+ query_delete = sql.select([column],
test_table.c.id < 5).order_by(column)
delete_statement = utils.DeleteFromSelect(test_table,
query_delete, column)
@@ -70,7 +70,7 @@ def test_delete_from_select(self):
# Verify we delete 4 rows
self.assertEqual(result_delete.rowcount, 4)
- query_all = select([test_table]).\
+ query_all = sql.select([test_table]).\
where(test_table.c.uuid.in_(uuidstrs))
rows = conn.execute(query_all).fetchall()
# Verify we still have 6 rows in table
@@ -90,7 +90,7 @@ def test_check_shadow_table(self):
Column('c', String(256)))
table.create()
- #check missing shadow table
+ # check missing shadow table
self.assertRaises(NoSuchTableError,
utils.check_shadow_table, engine, table_name)
diff --git a/nova/tests/db/test_migrations.py b/nova/tests/db/test_migrations.py
index aed05d7b4b..a85daeb651 100644
--- a/nova/tests/db/test_migrations.py
+++ b/nova/tests/db/test_migrations.py
@@ -31,14 +31,14 @@
'openstack_citest' on localhost. The test will then use that db and u/p combo
to run the tests.
-For postgres on Ubuntu this can be done with the following commands:
+For postgres on Ubuntu this can be done with the following commands::
-sudo -u postgres psql
-postgres=# create user openstack_citest with createdb login password
- 'openstack_citest';
-postgres=# create database openstack_citest with owner openstack_citest;
-postgres=# create database openstack_baremetal_citest with owner
- openstack_citest;
+| sudo -u postgres psql
+| postgres=# create user openstack_citest with createdb login password
+| 'openstack_citest';
+| postgres=# create database openstack_citest with owner openstack_citest;
+| postgres=# create database openstack_baremetal_citest with owner
+| openstack_citest;
"""
@@ -53,8 +53,8 @@
import nova.db.sqlalchemy.migrate_repo
from nova.db.sqlalchemy import utils as db_utils
+from nova.i18n import _
from nova.openstack.common.db.sqlalchemy import utils as oslodbutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import test
@@ -707,6 +707,89 @@ def _post_downgrade_245(self, engine):
self.assertColumnNotExists(engine, 'networks', 'enable_dhcp')
self.assertColumnNotExists(engine, 'networks', 'share_address')
+ def _check_246(self, engine, data):
+ pci_devices = oslodbutils.get_table(engine, 'pci_devices')
+ self.assertEqual(1, len([fk for fk in pci_devices.foreign_keys
+ if fk.parent.name == 'compute_node_id']))
+
+ def _post_downgrade_246(self, engine):
+ pci_devices = oslodbutils.get_table(engine, 'pci_devices')
+ self.assertEqual(0, len([fk for fk in pci_devices.foreign_keys
+ if fk.parent.name == 'compute_node_id']))
+
+ def _check_247(self, engine, data):
+ quota_usages = oslodbutils.get_table(engine, 'quota_usages')
+ self.assertFalse(quota_usages.c.resource.nullable)
+
+ pci_devices = oslodbutils.get_table(engine, 'pci_devices')
+ self.assertTrue(pci_devices.c.deleted.nullable)
+ self.assertFalse(pci_devices.c.product_id.nullable)
+ self.assertFalse(pci_devices.c.vendor_id.nullable)
+ self.assertFalse(pci_devices.c.dev_type.nullable)
+
+ def _post_downgrade_247(self, engine):
+ quota_usages = oslodbutils.get_table(engine, 'quota_usages')
+ self.assertTrue(quota_usages.c.resource.nullable)
+
+ pci_devices = oslodbutils.get_table(engine, 'pci_devices')
+ self.assertFalse(pci_devices.c.deleted.nullable)
+ self.assertTrue(pci_devices.c.product_id.nullable)
+ self.assertTrue(pci_devices.c.vendor_id.nullable)
+ self.assertTrue(pci_devices.c.dev_type.nullable)
+
+ def _check_248(self, engine, data):
+ self.assertIndexMembers(engine, 'reservations',
+ 'reservations_deleted_expire_idx',
+ ['deleted', 'expire'])
+
+ def _post_downgrade_248(self, engine):
+ reservations = oslodbutils.get_table(engine, 'reservations')
+ index_names = [idx.name for idx in reservations.indexes]
+ self.assertNotIn('reservations_deleted_expire_idx', index_names)
+
+ def _check_249(self, engine, data):
+ # Assert that only one index exists that covers columns
+ # instance_uuid and device_name
+ bdm = oslodbutils.get_table(engine, 'block_device_mapping')
+ self.assertEqual(1, len([i for i in bdm.indexes
+ if [c.name for c in i.columns] ==
+ ['instance_uuid', 'device_name']]))
+
+ def _post_downgrade_249(self, engine):
+ # The duplicate index is not created on downgrade, so this
+ # asserts that only one index exists that covers columns
+ # instance_uuid and device_name
+ bdm = oslodbutils.get_table(engine, 'block_device_mapping')
+ self.assertEqual(1, len([i for i in bdm.indexes
+ if [c.name for c in i.columns] ==
+ ['instance_uuid', 'device_name']]))
+
+ def _check_250(self, engine, data):
+ self.assertTableNotExists(engine, 'instance_group_metadata')
+ self.assertTableNotExists(engine, 'shadow_instance_group_metadata')
+
+ def _post_downgrade_250(self, engine):
+ oslodbutils.get_table(engine, 'instance_group_metadata')
+ oslodbutils.get_table(engine, 'shadow_instance_group_metadata')
+
+ def _check_251(self, engine, data):
+ self.assertColumnExists(engine, 'compute_nodes', 'numa_topology')
+ self.assertColumnExists(
+ engine, 'shadow_compute_nodes', 'numa_topology')
+
+ compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
+ shadow_compute_nodes = oslodbutils.get_table(
+ engine, 'shadow_compute_nodes')
+ self.assertIsInstance(compute_nodes.c.numa_topology.type,
+ sqlalchemy.types.Text)
+ self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type,
+ sqlalchemy.types.Text)
+
+ def _post_downgrade_251(self, engine):
+ self.assertColumnNotExists(engine, 'compute_nodes', 'numa_topology')
+ self.assertColumnNotExists(
+ engine, 'shadow_compute_nodes', 'numa_topology')
+
class TestBaremetalMigrations(BaseWalkMigrationTestCase, CommonTestsMixIn):
"""Test sqlalchemy-migrate migrations."""
@@ -865,4 +948,4 @@ def test_all_migrations_have_downgrade(self):
helpful_msg = (_("The following migrations are missing a downgrade:"
"\n\t%s") % '\n\t'.join(sorted(missing_downgrade)))
- self.assertTrue(not missing_downgrade, helpful_msg)
+ self.assertFalse(missing_downgrade, helpful_msg)
diff --git a/nova/tests/fake_instance.py b/nova/tests/fake_instance.py
index e91cf8009c..b1a080269d 100644
--- a/nova/tests/fake_instance.py
+++ b/nova/tests/fake_instance.py
@@ -17,7 +17,6 @@
from nova import objects
from nova.objects import fields
-from nova.objects import instance_fault as inst_fault_obj
def fake_db_secgroups(instance, names):
@@ -103,6 +102,6 @@ def fake_fault_obj(context, instance_uuid, code=404,
}
if updates:
fault.update(updates)
- return inst_fault_obj.InstanceFault._from_db_object(context,
- inst_fault_obj.InstanceFault(),
- fault)
+ return objects.InstanceFault._from_db_object(context,
+ objects.InstanceFault(),
+ fault)
diff --git a/nova/tests/fake_ldap.py b/nova/tests/fake_ldap.py
index e3e6d77080..5e3a1cc7a3 100644
--- a/nova/tests/fake_ldap.py
+++ b/nova/tests/fake_ldap.py
@@ -23,7 +23,7 @@ class definitions. It implements the minimum emulation of the python ldap
import fnmatch
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import jsonutils
diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py
index cc1e724c5e..873d2cfa95 100644
--- a/nova/tests/fake_network.py
+++ b/nova/tests/fake_network.py
@@ -33,7 +33,6 @@
from nova.tests.objects import test_fixed_ip
from nova.tests.objects import test_instance_info_cache
from nova.tests.objects import test_pci_device
-from nova.virt.libvirt import config as libvirt_config
HOST = "testhost"
@@ -41,41 +40,6 @@
CONF.import_opt('use_ipv6', 'nova.netconf')
-class FakeIptablesFirewallDriver(object):
- def __init__(self, **kwargs):
- pass
-
- def setattr(self, key, val):
- self.__setattr__(key, val)
-
- def apply_instance_filter(self, instance, network_info):
- pass
-
-
-class FakeVIFDriver(object):
-
- def __init__(self, *args, **kwargs):
- pass
-
- def setattr(self, key, val):
- self.__setattr__(key, val)
-
- def get_config(self, instance, vif, image_meta, inst_type):
- conf = libvirt_config.LibvirtConfigGuestInterface()
-
- for attr, val in conf.__dict__.iteritems():
- if val is None:
- setattr(conf, attr, 'fake')
-
- return conf
-
- def plug(self, instance, vif):
- pass
-
- def unplug(self, instance, vif):
- pass
-
-
class FakeModel(dict):
"""Represent a model from the db."""
def __init__(self, *args, **kwargs):
@@ -189,7 +153,8 @@ def deallocate_fixed_ip(self, context, address=None, host=None,
self.deallocate_called = address
def _create_fixed_ips(self, context, network_id, fixed_cidr=None,
- extra_reserved=None):
+ extra_reserved=None, bottom_reserved=0,
+ top_reserved=0):
pass
def get_instance_nw_info(context, instance_id, rxtx_factor,
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index ede5974b17..d0fd1abc01 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -59,6 +59,9 @@
"compute:attach_volume": "",
"compute:detach_volume": "",
+ "compute:attach_interface": "",
+ "compute:detach_interface": "",
+
"compute:set_admin_password": "",
"compute:rescue": "",
@@ -135,7 +138,15 @@
"compute_extension:v3:os-attach-interfaces": "",
"compute_extension:baremetal_nodes": "",
"compute_extension:cells": "",
+ "compute_extension:cells:create": "rule:admin_api",
+ "compute_extension:cells:delete": "rule:admin_api",
+ "compute_extension:cells:update": "rule:admin_api",
+ "compute_extension:cells:sync_instances": "rule:admin_api",
"compute_extension:v3:os-cells": "",
+ "compute_extension:v3:os-cells:create": "rule:admin_api",
+ "compute_extension:v3:os-cells:delete": "rule:admin_api",
+ "compute_extension:v3:os-cells:update": "rule:admin_api",
+ "compute_extension:v3:os-cells:sync_instances": "rule:admin_api",
"compute_extension:certificates": "",
"compute_extension:v3:os-certificates:create": "",
"compute_extension:v3:os-certificates:show": "",
@@ -207,7 +218,7 @@
"compute_extension:v3:os-hide-server-addresses": "",
"compute_extension:hosts": "",
"compute_extension:v3:os-hosts": "rule:admin_api",
- "compute_extension:hypervisors": "",
+ "compute_extension:hypervisors": "rule:admin_api",
"compute_extension:v3:os-hypervisors": "rule:admin_api",
"compute_extension:image_size": "",
"compute_extension:instance_actions": "",
@@ -374,6 +385,7 @@
"network:get_dns_entries_by_name": "",
"network:create_private_dns_domain": "",
"network:create_public_dns_domain": "",
- "network:delete_dns_domain": ""
+ "network:delete_dns_domain": "",
+ "network:attach_external_network": "rule:admin_api"
}
"""
diff --git a/nova/tests/fake_utils.py b/nova/tests/fake_utils.py
index cb73bc8bb9..7a97866d20 100644
--- a/nova/tests/fake_utils.py
+++ b/nova/tests/fake_utils.py
@@ -23,6 +23,14 @@ def stub_out_utils_spawn_n(stubs):
This aids testing async processes by blocking until they're done.
"""
def no_spawn(func, *args, **kwargs):
- return func(*args, **kwargs)
+ try:
+ return func(*args, **kwargs)
+ except Exception:
+ # NOTE(danms): This is supposed to simulate spawning
+ # of a thread, which would run separate from the parent,
+ # and die silently on error. If we don't catch and discard
+ # any exceptions here, we're not honoring the usual
+ # behavior.
+ pass
stubs.Set(utils, 'spawn_n', no_spawn)
diff --git a/nova/tests/fake_volume.py b/nova/tests/fake_volume.py
index 5318e86f0b..e37da85c85 100644
--- a/nova/tests/fake_volume.py
+++ b/nova/tests/fake_volume.py
@@ -17,7 +17,7 @@
from oslo.config import cfg
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py
deleted file mode 100644
index 8e8e3aa0be..0000000000
--- a/nova/tests/glance/stubs.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright (c) 2011 Citrix Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import glanceclient.exc
-
-
-NOW_GLANCE_FORMAT = "2010-10-11T10:30:22"
-
-
-class StubGlanceClient(object):
-
- def __init__(self, images=None, version=None, endpoint=None, **params):
- self.auth_token = params.get('token')
- self.identity_headers = params.get('identity_headers')
- if self.identity_headers:
- if self.identity_headers.get('X-Auth-Token'):
- self.auth_token = (self.identity_headers.get('X-Auth_Token') or
- self.auth_token)
- del self.identity_headers['X-Auth-Token']
- self._images = []
- _images = images or []
- map(lambda image: self.create(**image), _images)
-
- #NOTE(bcwaldon): HACK to get client.images.* to work
- self.images = lambda: None
- for fn in ('list', 'get', 'data', 'create', 'update', 'delete'):
- setattr(self.images, fn, getattr(self, fn))
-
- #TODO(bcwaldon): implement filters
- def list(self, filters=None, marker=None, limit=30, page_size=20):
- if marker is None:
- index = 0
- else:
- for index, image in enumerate(self._images):
- if image.id == str(marker):
- index += 1
- break
- else:
- raise glanceclient.exc.BadRequest('Marker not found')
- return self._images[index:index + limit]
-
- def get(self, image_id):
- for image in self._images:
- if image.id == str(image_id):
- return image
- raise glanceclient.exc.NotFound(image_id)
-
- def data(self, image_id):
- self.get(image_id)
- return []
-
- def create(self, **metadata):
- metadata['created_at'] = NOW_GLANCE_FORMAT
- metadata['updated_at'] = NOW_GLANCE_FORMAT
-
- self._images.append(FakeImage(metadata))
-
- try:
- image_id = str(metadata['id'])
- except KeyError:
- # auto-generate an id if one wasn't provided
- image_id = str(len(self._images))
-
- self._images[-1].id = image_id
-
- return self._images[-1]
-
- def update(self, image_id, **metadata):
- for i, image in enumerate(self._images):
- if image.id == str(image_id):
- # If you try to update a non-authorized image, it raises
- # HTTPForbidden
- if image.owner == 'authorized_fake':
- raise glanceclient.exc.HTTPForbidden
-
- for k, v in metadata.items():
- setattr(self._images[i], k, v)
- return self._images[i]
- raise glanceclient.exc.NotFound(image_id)
-
- def delete(self, image_id):
- for i, image in enumerate(self._images):
- if image.id == image_id:
- # When you delete an image from glance, it sets the status to
- # DELETED. If you try to delete a DELETED image, it raises
- # HTTPForbidden.
- image_data = self._images[i]
- if image_data.deleted:
- raise glanceclient.exc.HTTPForbidden()
- image_data.deleted = True
- image_data.deleted_at = NOW_GLANCE_FORMAT
- return
- raise glanceclient.exc.NotFound(image_id)
-
-
-class FakeImage(object):
- def __init__(self, metadata):
- IMAGE_ATTRIBUTES = ['size', 'disk_format', 'owner',
- 'container_format', 'checksum', 'id',
- 'name', 'created_at', 'updated_at',
- 'deleted', 'deleted_at', 'status',
- 'min_disk', 'min_ram', 'is_public']
- raw = dict.fromkeys(IMAGE_ATTRIBUTES)
- raw.update(metadata)
- self.__dict__['raw'] = raw
-
- def __getattr__(self, key):
- try:
- return self.__dict__['raw'][key]
- except KeyError:
- raise AttributeError(key)
-
- def __setattr__(self, key, value):
- try:
- self.__dict__['raw'][key] = value
- except KeyError:
- raise AttributeError(key)
diff --git a/nova/tests/image/fake.py b/nova/tests/image/fake.py
index 0e8d3ac114..1a6adce9c9 100644
--- a/nova/tests/image/fake.py
+++ b/nova/tests/image/fake.py
@@ -154,7 +154,7 @@ def __init__(self):
self._imagedata = {}
super(_FakeImageService, self).__init__()
- #TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
+ # TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
@@ -167,7 +167,7 @@ def download(self, context, image_id, dst_path=None, data=None):
with open(dst_path, 'wb') as data:
data.write(self._imagedata.get(image_id, ''))
- def show(self, context, image_id):
+ def show(self, context, image_id, include_locations=False):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 557ab25298..d72061e218 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -15,16 +15,10 @@
import datetime
-import filecmp
-import os
-import random
import sys
-import tempfile
-import time
import glanceclient.exc
import mock
-import mox
from oslo.config import cfg
import testtools
@@ -32,19 +26,118 @@
from nova import exception
from nova.image import glance
from nova import test
-from nova.tests.api.openstack import fakes
-from nova.tests.glance import stubs as glance_stubs
from nova import utils
-import nova.virt.libvirt.utils as lv_utils
CONF = cfg.CONF
+NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
-class NullWriter(object):
- """Used to test ImageService.get which takes a writer object."""
+class tzinfo(datetime.tzinfo):
+ @staticmethod
+ def utcoffset(*args, **kwargs):
+ return datetime.timedelta()
+
+NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
+
+
+class TestConversions(test.NoDBTestCase):
+ def test_convert_timestamps_to_datetimes(self):
+ fixture = {'name': None,
+ 'properties': {},
+ 'status': None,
+ 'is_public': None,
+ 'created_at': NOW_GLANCE_FORMAT,
+ 'updated_at': NOW_GLANCE_FORMAT,
+ 'deleted_at': NOW_GLANCE_FORMAT}
+ result = glance._convert_timestamps_to_datetimes(fixture)
+ self.assertEqual(result['created_at'], NOW_DATETIME)
+ self.assertEqual(result['updated_at'], NOW_DATETIME)
+ self.assertEqual(result['deleted_at'], NOW_DATETIME)
+
+ def _test_extracting_missing_attributes(self, include_locations):
+ # Verify behavior from glance objects that are missing attributes
+ # TODO(jaypipes): Find a better way of testing this crappy
+ # glanceclient magic object stuff.
+ class MyFakeGlanceImage(object):
+ def __init__(self, metadata):
+ IMAGE_ATTRIBUTES = ['size', 'owner', 'id', 'created_at',
+ 'updated_at', 'status', 'min_disk',
+ 'min_ram', 'is_public']
+ raw = dict.fromkeys(IMAGE_ATTRIBUTES)
+ raw.update(metadata)
+ self.__dict__['raw'] = raw
+
+ def __getattr__(self, key):
+ try:
+ return self.__dict__['raw'][key]
+ except KeyError:
+ raise AttributeError(key)
+
+ def __setattr__(self, key, value):
+ try:
+ self.__dict__['raw'][key] = value
+ except KeyError:
+ raise AttributeError(key)
+
+ metadata = {
+ 'id': 1,
+ 'created_at': NOW_DATETIME,
+ 'updated_at': NOW_DATETIME,
+ }
+ image = MyFakeGlanceImage(metadata)
+ observed = glance._extract_attributes(
+ image, include_locations=include_locations)
+ expected = {
+ 'id': 1,
+ 'name': None,
+ 'is_public': None,
+ 'size': None,
+ 'min_disk': None,
+ 'min_ram': None,
+ 'disk_format': None,
+ 'container_format': None,
+ 'checksum': None,
+ 'created_at': NOW_DATETIME,
+ 'updated_at': NOW_DATETIME,
+ 'deleted_at': None,
+ 'deleted': None,
+ 'status': None,
+ 'properties': {},
+ 'owner': None
+ }
+ if include_locations:
+ expected['locations'] = None
+ expected['direct_url'] = None
+ self.assertEqual(expected, observed)
+
+ def test_extracting_missing_attributes_include_locations(self):
+ self._test_extracting_missing_attributes(include_locations=True)
+
+ def test_extracting_missing_attributes_exclude_locations(self):
+ self._test_extracting_missing_attributes(include_locations=False)
- def write(self, *arg, **kwargs):
- pass
+
+class TestExceptionTranslations(test.NoDBTestCase):
+
+ def test_client_forbidden_to_imagenotauthed(self):
+ in_exc = glanceclient.exc.Forbidden('123')
+ out_exc = glance._translate_image_exception('123', in_exc)
+ self.assertIsInstance(out_exc, exception.ImageNotAuthorized)
+
+ def test_client_httpforbidden_converts_to_imagenotauthed(self):
+ in_exc = glanceclient.exc.HTTPForbidden('123')
+ out_exc = glance._translate_image_exception('123', in_exc)
+ self.assertIsInstance(out_exc, exception.ImageNotAuthorized)
+
+ def test_client_notfound_converts_to_imagenotfound(self):
+ in_exc = glanceclient.exc.NotFound('123')
+ out_exc = glance._translate_image_exception('123', in_exc)
+ self.assertIsInstance(out_exc, exception.ImageNotFound)
+
+ def test_client_httpnotfound_converts_to_imagenotfound(self):
+ in_exc = glanceclient.exc.HTTPNotFound('123')
+ out_exc = glance._translate_image_exception('123', in_exc)
+ self.assertIsInstance(out_exc, exception.ImageNotFound)
class TestGlanceSerializer(test.NoDBTestCase):
@@ -83,487 +176,464 @@ def test_serialize(self):
self.assertEqual(glance._convert_from_string(converted), metadata)
-class TestGlanceImageService(test.NoDBTestCase):
- """Tests the Glance image service.
+class TestGetImageService(test.NoDBTestCase):
+ @mock.patch.object(glance.GlanceClientWrapper, '__init__',
+ return_value=None)
+ def test_get_remote_service_from_id(self, gcwi_mocked):
+ id_or_uri = '123'
+ _ignored, image_id = glance.get_remote_image_service(
+ mock.sentinel.ctx, id_or_uri)
+ self.assertEqual(id_or_uri, image_id)
+ gcwi_mocked.assert_called_once_with()
+
+ @mock.patch.object(glance.GlanceClientWrapper, '__init__',
+ return_value=None)
+ def test_get_remote_service_from_href(self, gcwi_mocked):
+ id_or_uri = 'http://127.0.0.1/123'
+ _ignored, image_id = glance.get_remote_image_service(
+ mock.sentinel.ctx, id_or_uri)
+ self.assertEqual('123', image_id)
+ gcwi_mocked.assert_called_once_with(context=mock.sentinel.ctx,
+ host='127.0.0.1',
+ port=80,
+ use_ssl=False)
+
+
+class TestCreateGlanceClient(test.NoDBTestCase):
+ @mock.patch('nova.utils.is_valid_ipv6')
+ @mock.patch('glanceclient.Client')
+ def test_headers_passed_glanceclient(self, init_mock, ipv6_mock):
+ self.flags(auth_strategy='keystone')
+ ipv6_mock.return_value = False
+ auth_token = 'token'
+ ctx = context.RequestContext('fake', 'fake', auth_token=auth_token)
+ host = 'host4'
+ port = 9295
+ use_ssl = False
+
+ expected_endpoint = 'http://host4:9295'
+ expected_params = {
+ 'identity_headers': {
+ 'X-Auth-Token': 'token',
+ 'X-User-Id': 'fake',
+ 'X-Roles': '',
+ 'X-Tenant-Id': 'fake',
+ 'X-Service-Catalog': '[]',
+ 'X-Identity-Status': 'Confirmed'
+ },
+ 'token': 'token'
+ }
+ glance._create_glance_client(ctx, host, port, use_ssl)
+ init_mock.assert_called_once_with('1', expected_endpoint,
+ **expected_params)
+
+ # Test the version is properly passed to glanceclient.
+ ipv6_mock.reset_mock()
+ init_mock.reset_mock()
+
+ expected_endpoint = 'http://host4:9295'
+ expected_params = {
+ 'identity_headers': {
+ 'X-Auth-Token': 'token',
+ 'X-User-Id': 'fake',
+ 'X-Roles': '',
+ 'X-Tenant-Id': 'fake',
+ 'X-Service-Catalog': '[]',
+ 'X-Identity-Status': 'Confirmed'
+ },
+ 'token': 'token'
+ }
+ glance._create_glance_client(ctx, host, port, use_ssl, version=2)
+ init_mock.assert_called_once_with('2', expected_endpoint,
+ **expected_params)
- At a high level, the translations involved are:
+ # Test that non-keystone auth strategy doesn't bother to pass
+ # glanceclient all the Keystone-related headers.
+ ipv6_mock.reset_mock()
+ init_mock.reset_mock()
- 1. Glance -> ImageService - This is needed so we can support
- multple ImageServices (Glance, Local, etc)
+ self.flags(auth_strategy='non-keystone')
- 2. ImageService -> API - This is needed so we can support multple
- APIs (OpenStack, EC2)
+ expected_endpoint = 'http://host4:9295'
+ expected_params = {
+ }
+ glance._create_glance_client(ctx, host, port, use_ssl)
+ init_mock.assert_called_once_with('1', expected_endpoint,
+ **expected_params)
- """
- NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22"
- NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
-
- class tzinfo(datetime.tzinfo):
- @staticmethod
- def utcoffset(*args, **kwargs):
- return datetime.timedelta()
-
- NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
-
- def setUp(self):
- super(TestGlanceImageService, self).setUp()
- fakes.stub_out_compute_api_snapshot(self.stubs)
-
- self.client = glance_stubs.StubGlanceClient()
- self.service = self._create_image_service(self.client)
- self.context = context.RequestContext('fake', 'fake', auth_token=True)
- self.mox = mox.Mox()
- self.files_to_clean = []
-
- def tearDown(self):
- super(TestGlanceImageService, self).tearDown()
- self.mox.UnsetStubs()
- for f in self.files_to_clean:
- try:
- os.unlink(f)
- except os.error:
- pass
-
- def _get_tempfile(self):
- (outfd, config_filename) = tempfile.mkstemp(prefix='nova_glance_tests')
- self.files_to_clean.append(config_filename)
- return (outfd, config_filename)
-
- def _create_image_service(self, client):
- def _fake_create_glance_client(context, host, port, use_ssl, version):
- return client
-
- self.stubs.Set(glance, '_create_glance_client',
- _fake_create_glance_client)
-
- client_wrapper = glance.GlanceClientWrapper(
- 'fake', 'fake_host', 9292)
- return glance.GlanceImageService(client=client_wrapper)
+ # Test that the IPv6 bracketization adapts the endpoint properly.
+ ipv6_mock.reset_mock()
+ init_mock.reset_mock()
- @staticmethod
- def _make_fixture(**kwargs):
- fixture = {'name': None,
- 'properties': {},
- 'status': None,
- 'is_public': None}
- fixture.update(kwargs)
- return fixture
-
- def _make_datetime_fixture(self):
- return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT,
- updated_at=self.NOW_GLANCE_FORMAT,
- deleted_at=self.NOW_GLANCE_FORMAT)
-
- def test_show_makes_datetimes(self):
- fixture = self._make_datetime_fixture()
- image_id = self.service.create(self.context, fixture)['id']
- image_meta = self.service.show(self.context, image_id)
- self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
- self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
-
- def test_detail_makes_datetimes(self):
- fixture = self._make_datetime_fixture()
- self.service.create(self.context, fixture)
- image_meta = self.service.detail(self.context)[0]
- self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
- self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
-
- def test_page_size(self):
- with mock.patch.object(glance.GlanceClientWrapper, 'call') as a_mock:
- self.service.detail(self.context, page_size=5)
- self.assertEqual(a_mock.called, True)
- a_mock.assert_called_with(self.context, 1, 'list',
- filters={'is_public': 'none'},
- page_size=5)
-
- def test_download_with_retries(self):
- tries = [0]
-
- class MyGlanceStubClient(glance_stubs.StubGlanceClient):
- """A client that fails the first time, then succeeds."""
- def get(self, image_id):
- if tries[0] == 0:
- tries[0] = 1
- raise glanceclient.exc.ServiceUnavailable('')
- else:
- return {}
-
- client = MyGlanceStubClient()
- service = self._create_image_service(client)
- image_id = 1 # doesn't matter
- writer = NullWriter()
-
- # When retries are disabled, we should get an exception
- self.flags(num_retries=0, group='glance')
- self.assertRaises(exception.GlanceConnectionFailed,
- service.download, self.context, image_id, data=writer)
+ ipv6_mock.return_value = True
- # Now lets enable retries. No exception should happen now.
- tries = [0]
- self.flags(num_retries=1, group='glance')
- service.download(self.context, image_id, data=writer)
+ expected_endpoint = 'http://[host4]:9295'
+ expected_params = {
+ }
+ glance._create_glance_client(ctx, host, port, use_ssl)
+ init_mock.assert_called_once_with('1', expected_endpoint,
+ **expected_params)
- def test_download_file_url(self):
- self.flags(allowed_direct_url_schemes=['file'], group='glance')
- class MyGlanceStubClient(glance_stubs.StubGlanceClient):
- """A client that returns a file url."""
+class TestGlanceClientWrapper(test.NoDBTestCase):
+ @mock.patch('time.sleep')
+ @mock.patch('nova.image.glance._create_glance_client')
+ def test_static_client_without_retries(self, create_client_mock,
+ sleep_mock):
+ client_mock = mock.MagicMock()
+ images_mock = mock.MagicMock()
+ images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable
+ type(client_mock).images = mock.PropertyMock(return_value=images_mock)
+ create_client_mock.return_value = client_mock
+ self.flags(num_retries=0, group='glance')
- (outfd, s_tmpfname) = tempfile.mkstemp(prefix='directURLsrc')
- outf = os.fdopen(outfd, 'w')
- inf = open('/dev/urandom', 'r')
- for i in range(10):
- _data = inf.read(1024)
- outf.write(_data)
- outf.close()
+ ctx = context.RequestContext('fake', 'fake')
+ host = 'host4'
+ port = 9295
+ use_ssl = False
- def get(self, image_id):
- return type('GlanceTestDirectUrlMeta', (object,),
- {'direct_url': 'file://%s' + self.s_tmpfname})
+ client = glance.GlanceClientWrapper(context=ctx, host=host, port=port,
+ use_ssl=use_ssl)
+ create_client_mock.assert_called_once_with(ctx, host, port, use_ssl, 1)
+ self.assertRaises(exception.GlanceConnectionFailed,
+ client.call, ctx, 1, 'get', 'meow')
+ self.assertFalse(sleep_mock.called)
- client = MyGlanceStubClient()
- (outfd, tmpfname) = tempfile.mkstemp(prefix='directURLdst')
- os.close(outfd)
+ @mock.patch('time.sleep')
+ @mock.patch('nova.image.glance._create_glance_client')
+ def test_static_client_with_retries(self, create_client_mock,
+ sleep_mock):
+ self.flags(num_retries=1, group='glance')
+ client_mock = mock.MagicMock()
+ images_mock = mock.MagicMock()
+ images_mock.get.side_effect = [
+ glanceclient.exc.ServiceUnavailable,
+ None
+ ]
+ type(client_mock).images = mock.PropertyMock(return_value=images_mock)
+ create_client_mock.return_value = client_mock
+
+ ctx = context.RequestContext('fake', 'fake')
+ host = 'host4'
+ port = 9295
+ use_ssl = False
+
+ client = glance.GlanceClientWrapper(context=ctx,
+ host=host, port=port, use_ssl=use_ssl)
+ client.call(ctx, 1, 'get', 'meow')
+ sleep_mock.assert_called_once_with(1)
+
+ @mock.patch('random.shuffle')
+ @mock.patch('time.sleep')
+ @mock.patch('nova.image.glance._create_glance_client')
+ def test_default_client_without_retries(self, create_client_mock,
+ sleep_mock, shuffle_mock):
+ api_servers = [
+ 'host1:9292',
+ 'https://host2:9293',
+ 'http://host3:9294'
+ ]
+ client_mock = mock.MagicMock()
+ images_mock = mock.MagicMock()
+ images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable
+ type(client_mock).images = mock.PropertyMock(return_value=images_mock)
+ create_client_mock.return_value = client_mock
- service = self._create_image_service(client)
- image_id = 1 # doesn't matter
+ shuffle_mock.return_value = api_servers
+ self.flags(num_retries=0, group='glance')
+ self.flags(api_servers=api_servers, group='glance')
- service.download(self.context, image_id, dst_path=tmpfname)
+ # Here we are testing the behaviour that calling client.call() twice
+ # when there are no retries will cycle through the api_servers and not
+ # sleep (which would be an indication of a retry)
+ ctx = context.RequestContext('fake', 'fake')
- # compare the two files
- rc = filecmp.cmp(tmpfname, client.s_tmpfname)
- self.assertTrue(rc, "The file %s and %s should be the same" %
- (tmpfname, client.s_tmpfname))
- os.remove(client.s_tmpfname)
- os.remove(tmpfname)
+ client = glance.GlanceClientWrapper()
+ self.assertRaises(exception.GlanceConnectionFailed,
+ client.call, ctx, 1, 'get', 'meow')
+ self.assertFalse(sleep_mock.called)
- def test_download_module_filesystem_match(self):
+ self.assertRaises(exception.GlanceConnectionFailed,
+ client.call, ctx, 1, 'get', 'meow')
+ self.assertFalse(sleep_mock.called)
+
+ create_client_mock.assert_has_calls(
+ [
+ mock.call(ctx, 'host1', 9292, False, 1),
+ mock.call(ctx, 'host2', 9293, True, 1),
+ ]
+ )
+
+ @mock.patch('random.shuffle')
+ @mock.patch('time.sleep')
+ @mock.patch('nova.image.glance._create_glance_client')
+ def test_default_client_with_retries(self, create_client_mock,
+ sleep_mock, shuffle_mock):
+ api_servers = [
+ 'host1:9292',
+ 'https://host2:9293',
+ 'http://host3:9294'
+ ]
+ client_mock = mock.MagicMock()
+ images_mock = mock.MagicMock()
+ images_mock.get.side_effect = [
+ glanceclient.exc.ServiceUnavailable,
+ None
+ ]
+ type(client_mock).images = mock.PropertyMock(return_value=images_mock)
+ create_client_mock.return_value = client_mock
- mountpoint = '/'
- fs_id = 'someid'
- desc = {'id': fs_id, 'mountpoint': mountpoint}
+ self.flags(num_retries=1, group='glance')
+ self.flags(api_servers=api_servers, group='glance')
- class MyGlanceStubClient(glance_stubs.StubGlanceClient):
- outer_test = self
+ ctx = context.RequestContext('fake', 'fake')
- def get(self, image_id):
- return type('GlanceLocations', (object,),
- {'locations': [
- {'url': 'file:///' + os.devnull,
- 'metadata': desc}]})
+ # And here we're testing that if num_retries is not 0, then we attempt
+ # to retry the same connection action against the next client.
- def data(self, image_id):
- self.outer_test.fail('This should not be called because the '
- 'transfer module should have intercepted '
- 'it.')
+ client = glance.GlanceClientWrapper()
+ client.call(ctx, 1, 'get', 'meow')
- self.mox.StubOutWithMock(lv_utils, 'copy_image')
+ create_client_mock.assert_has_calls(
+ [
+ mock.call(ctx, 'host1', 9292, False, 1),
+ mock.call(ctx, 'host2', 9293, True, 1),
+ ]
+ )
+ sleep_mock.assert_called_once_with(1)
- image_id = 1 # doesn't matter
- client = MyGlanceStubClient()
- self.flags(allowed_direct_url_schemes=['file'], group='glance')
- self.flags(group='image_file_url', filesystems=['gluster'])
- service = self._create_image_service(client)
- #NOTE(Jbresnah) The following options must be added after the module
- # has added the specific groups.
- self.flags(group='image_file_url:gluster', id=fs_id)
- self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
-
- dest_file = os.devnull
- lv_utils.copy_image(mox.IgnoreArg(), dest_file)
-
- self.mox.ReplayAll()
- service.download(self.context, image_id, dst_path=dest_file)
- self.mox.VerifyAll()
-
- def test_download_module_no_filesystem_match(self):
- mountpoint = '/'
- fs_id = 'someid'
- desc = {'id': fs_id, 'mountpoint': mountpoint}
- some_data = "sfxvdwjer"
-
- class MyGlanceStubClient(glance_stubs.StubGlanceClient):
- outer_test = self
-
- def get(self, image_id):
- return type('GlanceLocations', (object,),
- {'locations': [
- {'url': 'file:///' + os.devnull,
- 'metadata': desc}]})
-
- def data(self, image_id):
- return some_data
-
- def _fake_copyfile(source, dest):
- self.fail('This should not be called because a match should not '
- 'have been found.')
- self.stubs.Set(lv_utils, 'copy_image', _fake_copyfile)
-
- image_id = 1 # doesn't matter
- client = MyGlanceStubClient()
- self.flags(allowed_direct_url_schemes=['file'], group='glance')
- self.flags(group='image_file_url', filesystems=['gluster'])
- service = self._create_image_service(client)
- #NOTE(Jbresnah) The following options must be added after the module
- # has added the specific groups.
- self.flags(group='image_file_url:gluster', id='someotherid')
- self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
-
- service.download(self.context, image_id,
- dst_path=os.devnull,
- data=None)
-
- def test_download_module_mountpoints(self):
- glance_mount = '/glance/mount/point'
- _, data_filename = self._get_tempfile()
- nova_mount = os.path.dirname(data_filename)
- source_path = os.path.basename(data_filename)
- file_url = 'file://%s' % os.path.join(glance_mount, source_path)
- file_system_id = 'test_FS_ID'
- file_system_desc = {'id': file_system_id, 'mountpoint': glance_mount}
-
- class MyGlanceStubClient(glance_stubs.StubGlanceClient):
- outer_test = self
-
- def get(self, image_id):
- return type('GlanceLocations', (object,),
- {'locations': [{'url': file_url,
- 'metadata': file_system_desc}]})
-
- def data(self, image_id):
- self.outer_test.fail('This should not be called because the '
- 'transfer module should have intercepted '
- 'it.')
-
- self.copy_called = False
-
- def _fake_copyfile(source, dest):
- self.assertEqual(source, data_filename)
- self.copy_called = True
- self.stubs.Set(lv_utils, 'copy_image', _fake_copyfile)
- self.flags(allowed_direct_url_schemes=['file'], group='glance')
- self.flags(group='image_file_url', filesystems=['gluster'])
- image_id = 1 # doesn't matter
- client = MyGlanceStubClient()
- service = self._create_image_service(client)
- self.flags(group='image_file_url:gluster', id=file_system_id)
- self.flags(group='image_file_url:gluster', mountpoint=nova_mount)
+class TestDownloadNoDirectUri(test.NoDBTestCase):
- service.download(self.context, image_id, dst_path=os.devnull)
- self.assertTrue(self.copy_called)
+ """Tests the download method of the GlanceImageService when the
+ default of not allowing direct URI transfers is set.
+ """
- def test_download_module_file_bad_module(self):
- _, data_filename = self._get_tempfile()
- file_url = 'applesauce://%s' % data_filename
+ @mock.patch('__builtin__.open')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_no_data_no_dest_path(self, show_mock, open_mock):
+ client = mock.MagicMock()
+ client.call.return_value = mock.sentinel.image_chunks
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id)
- class MyGlanceStubClient(glance_stubs.StubGlanceClient):
- data_called = False
+ self.assertFalse(show_mock.called)
+ self.assertFalse(open_mock.called)
+ client.call.assert_called_once_with(ctx, 1, 'data',
+ mock.sentinel.image_id)
+ self.assertEqual(mock.sentinel.image_chunks, res)
- def get(self, image_id):
- return type('GlanceLocations', (object,),
- {'locations': [{'url': file_url,
- 'metadata': {}}]})
+ @mock.patch('__builtin__.open')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_data_no_dest_path(self, show_mock, open_mock):
+ client = mock.MagicMock()
+ client.call.return_value = [1, 2, 3]
+ ctx = mock.sentinel.ctx
+ data = mock.MagicMock()
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id, data=data)
- def data(self, image_id):
- self.data_called = True
- return "someData"
+ self.assertFalse(show_mock.called)
+ self.assertFalse(open_mock.called)
+ client.call.assert_called_once_with(ctx, 1, 'data',
+ mock.sentinel.image_id)
+ self.assertIsNone(res)
+ data.write.assert_has_calls(
+ [
+ mock.call(1),
+ mock.call(2),
+ mock.call(3)
+ ]
+ )
+ self.assertFalse(data.close.called)
+
+ @mock.patch('__builtin__.open')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_no_data_dest_path(self, show_mock, open_mock):
+ client = mock.MagicMock()
+ client.call.return_value = [1, 2, 3]
+ ctx = mock.sentinel.ctx
+ writer = mock.MagicMock()
+ open_mock.return_value = writer
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id,
+ dst_path=mock.sentinel.dst_path)
- self.flags(allowed_direct_url_schemes=['applesauce'], group='glance')
+ self.assertFalse(show_mock.called)
+ client.call.assert_called_once_with(ctx, 1, 'data',
+ mock.sentinel.image_id)
+ open_mock.assert_called_once_with(mock.sentinel.dst_path, 'wb')
+ self.assertIsNone(res)
+ writer.write.assert_has_calls(
+ [
+ mock.call(1),
+ mock.call(2),
+ mock.call(3)
+ ]
+ )
+ writer.close.assert_called_once_with()
+
+ @mock.patch('__builtin__.open')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_data_dest_path(self, show_mock, open_mock):
+ # NOTE(jaypipes): This really shouldn't be allowed, but because of the
+ # horrible design of the download() method in GlanceImageService, no
+ # error is raised, and the dst_path is ignored...
+ # #TODO(jaypipes): Fix the aforementioned horrible design of
+ # the download() method.
+ client = mock.MagicMock()
+ client.call.return_value = [1, 2, 3]
+ ctx = mock.sentinel.ctx
+ data = mock.MagicMock()
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id, data=data)
- self.mox.StubOutWithMock(lv_utils, 'copy_image')
+ self.assertFalse(show_mock.called)
+ self.assertFalse(open_mock.called)
+ client.call.assert_called_once_with(ctx, 1, 'data',
+ mock.sentinel.image_id)
+ self.assertIsNone(res)
+ data.write.assert_has_calls(
+ [
+ mock.call(1),
+ mock.call(2),
+ mock.call(3)
+ ]
+ )
+ self.assertFalse(data.close.called)
+
+ @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_direct_file_uri(self, show_mock, get_tran_mock):
self.flags(allowed_direct_url_schemes=['file'], group='glance')
- image_id = 1 # doesn't matter
- client = MyGlanceStubClient()
- service = self._create_image_service(client)
-
- # by not calling copyfileobj in the file download module we verify
- # that the requirements were not met for its use
- self.mox.ReplayAll()
- service.download(self.context, image_id, dst_path=os.devnull)
- self.mox.VerifyAll()
-
- self.assertTrue(client.data_called)
-
- def test_client_forbidden_converts_to_imagenotauthed(self):
- class MyGlanceStubClient(glance_stubs.StubGlanceClient):
- """A client that raises a Forbidden exception."""
- def get(self, image_id):
- raise glanceclient.exc.Forbidden(image_id)
-
- client = MyGlanceStubClient()
- service = self._create_image_service(client)
- image_id = 1 # doesn't matter
- self.assertRaises(exception.ImageNotAuthorized, service.download,
- self.context, image_id, dst_path=os.devnull)
-
- def test_client_httpforbidden_converts_to_imagenotauthed(self):
- class MyGlanceStubClient(glance_stubs.StubGlanceClient):
- """A client that raises a HTTPForbidden exception."""
- def get(self, image_id):
- raise glanceclient.exc.HTTPForbidden(image_id)
-
- client = MyGlanceStubClient()
- service = self._create_image_service(client)
- image_id = 1 # doesn't matter
- self.assertRaises(exception.ImageNotAuthorized, service.download,
- self.context, image_id, dst_path=os.devnull)
-
- def test_client_notfound_converts_to_imagenotfound(self):
- class MyGlanceStubClient(glance_stubs.StubGlanceClient):
- """A client that raises a NotFound exception."""
- def get(self, image_id):
- raise glanceclient.exc.NotFound(image_id)
-
- client = MyGlanceStubClient()
- service = self._create_image_service(client)
- image_id = 1 # doesn't matter
- self.assertRaises(exception.ImageNotFound, service.download,
- self.context, image_id, dst_path=os.devnull)
-
- def test_client_httpnotfound_converts_to_imagenotfound(self):
- class MyGlanceStubClient(glance_stubs.StubGlanceClient):
- """A client that raises a HTTPNotFound exception."""
- def get(self, image_id):
- raise glanceclient.exc.HTTPNotFound(image_id)
-
- client = MyGlanceStubClient()
- service = self._create_image_service(client)
- image_id = 1 # doesn't matter
- self.assertRaises(exception.ImageNotFound, service.download,
- self.context, image_id, dst_path=os.devnull)
-
- def test_glance_client_image_id(self):
- fixture = self._make_fixture(name='test image')
- image_id = self.service.create(self.context, fixture)['id']
- (service, same_id) = glance.get_remote_image_service(
- self.context, image_id)
- self.assertEqual(same_id, image_id)
-
- def test_glance_client_image_ref(self):
- fixture = self._make_fixture(name='test image')
- image_id = self.service.create(self.context, fixture)['id']
- image_url = 'http://something-less-likely/%s' % image_id
- (service, same_id) = glance.get_remote_image_service(
- self.context, image_url)
- self.assertEqual(same_id, image_id)
- self.assertEqual(service._client.host, 'something-less-likely')
-
- def test_extracting_missing_attributes(self):
- """Verify behavior from glance objects that are missing attributes
-
- This fakes the image class and is missing attribute as the client can
- return if they're not set in the database.
- """
- class MyFakeGlanceImage(glance_stubs.FakeImage):
- def __init__(self, metadata):
- IMAGE_ATTRIBUTES = ['size', 'owner', 'id', 'created_at',
- 'updated_at', 'status', 'min_disk',
- 'min_ram', 'is_public']
- raw = dict.fromkeys(IMAGE_ATTRIBUTES)
- raw.update(metadata)
- self.__dict__['raw'] = raw
-
- metadata = {
- 'id': 1,
- 'created_at': self.NOW_DATETIME,
- 'updated_at': self.NOW_DATETIME,
+ show_mock.return_value = {
+ 'locations': [
+ {
+ 'url': 'file:///files/image',
+ 'metadata': mock.sentinel.loc_meta
+ }
+ ]
}
- image = MyFakeGlanceImage(metadata)
- observed = glance._extract_attributes(image)
- expected = {
- 'id': 1,
- 'name': None,
- 'is_public': None,
- 'size': None,
- 'min_disk': None,
- 'min_ram': None,
- 'disk_format': None,
- 'container_format': None,
- 'checksum': None,
- 'created_at': self.NOW_DATETIME,
- 'updated_at': self.NOW_DATETIME,
- 'deleted_at': None,
- 'deleted': None,
- 'status': None,
- 'properties': {},
- 'owner': None,
+ tran_mod = mock.MagicMock()
+ get_tran_mock.return_value = tran_mod
+ client = mock.MagicMock()
+ ctx = mock.sentinel.ctx
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id,
+ dst_path=mock.sentinel.dst_path)
+
+ self.assertIsNone(res)
+ self.assertFalse(client.call.called)
+ show_mock.assert_called_once_with(ctx,
+ mock.sentinel.image_id,
+ include_locations=True)
+ get_tran_mock.assert_called_once_with('file')
+ tran_mod.download.assert_called_once_with(ctx, mock.ANY,
+ mock.sentinel.dst_path,
+ mock.sentinel.loc_meta)
+
+ @mock.patch('__builtin__.open')
+ @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_direct_exception_fallback(self, show_mock,
+ get_tran_mock,
+ open_mock):
+ # Test that we fall back to downloading to the dst_path
+ # if the download method of the transfer module raised
+ # an exception.
+ self.flags(allowed_direct_url_schemes=['file'], group='glance')
+ show_mock.return_value = {
+ 'locations': [
+ {
+ 'url': 'file:///files/image',
+ 'metadata': mock.sentinel.loc_meta
+ }
+ ]
}
- self.assertEqual(expected, observed)
-
-
-def _create_failing_glance_client(info):
- class MyGlanceStubClient(glance_stubs.StubGlanceClient):
- """A client that fails the first time, then succeeds."""
- def get(self, image_id):
- info['num_calls'] += 1
- if info['num_calls'] == 1:
- raise glanceclient.exc.ServiceUnavailable('')
- return {}
-
- return MyGlanceStubClient()
-
-
-class TestGetLocations(test.NoDBTestCase):
- """Tests the internal _get_locations function."""
-
- class ImageSpecV2(object):
- visibility = None
- properties = None
- locations = None
- direct_url = None
-
- @mock.patch('nova.image.glance._is_image_available')
- def test_success_has_locations(self, avail_mock):
- avail_mock.return_value = True
- locations = [
- mock.sentinel.loc1
- ]
- image_meta = mock.MagicMock(locations=locations,
- spec=TestGetLocations.ImageSpecV2)
-
- client_mock = mock.MagicMock()
- client_mock.call.return_value = image_meta
- locs = glance._get_locations(client_mock, mock.sentinel.ctx,
- mock.sentinel.image_id)
- client_mock.call.assert_called_once_with(mock.sentinel.ctx,
- 2, 'get',
- mock.sentinel.image_id)
- self.assertEqual(locations, locs)
- avail_mock.assert_called_once_with(mock.sentinel.ctx, image_meta)
-
- @mock.patch('nova.image.glance._is_image_available')
- def test_success_direct_uri_added_to_locations(self, avail_mock):
- avail_mock.return_value = True
- locations = [
- mock.sentinel.loc1
- ]
- image_meta = mock.MagicMock(locations=locations,
- spec=TestGetLocations.ImageSpecV2,
- direct_uri=mock.sentinel.duri)
-
- client_mock = mock.MagicMock()
- client_mock.call.return_value = image_meta
- locs = glance._get_locations(client_mock, mock.sentinel.ctx,
- mock.sentinel.image_id)
- client_mock.call.assert_called_once_with(mock.sentinel.ctx,
- 2, 'get',
- mock.sentinel.image_id)
- expected = locations
- expected.append({"url": mock.sentinel.duri, "metadata": {}})
- self.assertEqual(expected, locs)
-
- @mock.patch('nova.image.glance._reraise_translated_image_exception')
- @mock.patch('nova.image.glance._is_image_available')
- def test_get_locations_not_found(self, avail_mock, reraise_mock):
- raised = exception.ImageNotFound(image_id=123)
- reraise_mock.side_effect = raised
-
- client_mock = mock.MagicMock()
- client_mock.call.side_effect = glanceclient.exc.NotFound
- self.assertRaises(exception.ImageNotFound, glance._get_locations,
- client_mock, mock.sentinel.ctx,
- mock.sentinel.image_id)
+ tran_mod = mock.MagicMock()
+ tran_mod.download.side_effect = Exception
+ get_tran_mock.return_value = tran_mod
+ client = mock.MagicMock()
+ client.call.return_value = [1, 2, 3]
+ ctx = mock.sentinel.ctx
+ writer = mock.MagicMock()
+ open_mock.return_value = writer
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id,
+ dst_path=mock.sentinel.dst_path)
+
+ self.assertIsNone(res)
+ show_mock.assert_called_once_with(ctx,
+ mock.sentinel.image_id,
+ include_locations=True)
+ get_tran_mock.assert_called_once_with('file')
+ tran_mod.download.assert_called_once_with(ctx, mock.ANY,
+ mock.sentinel.dst_path,
+ mock.sentinel.loc_meta)
+ client.call.assert_called_once_with(ctx, 1, 'data',
+ mock.sentinel.image_id)
+ # NOTE(jaypipes): log messages call open() in part of the
+ # download path, so here, we just check that the last open()
+ # call was done for the dst_path file descriptor.
+ open_mock.assert_called_with(mock.sentinel.dst_path, 'wb')
+ self.assertIsNone(res)
+ writer.write.assert_has_calls(
+ [
+ mock.call(1),
+ mock.call(2),
+ mock.call(3)
+ ]
+ )
+
+ @mock.patch('__builtin__.open')
+ @mock.patch('nova.image.glance.GlanceImageService._get_transfer_module')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_direct_no_mod_fallback(self, show_mock,
+ get_tran_mock,
+ open_mock):
+ # Test that we fall back to downloading to the dst_path
+ # if no appropriate transfer module is found...
+ # an exception.
+ self.flags(allowed_direct_url_schemes=['funky'], group='glance')
+ show_mock.return_value = {
+ 'locations': [
+ {
+ 'url': 'file:///files/image',
+ 'metadata': mock.sentinel.loc_meta
+ }
+ ]
+ }
+ get_tran_mock.return_value = None
+ client = mock.MagicMock()
+ client.call.return_value = [1, 2, 3]
+ ctx = mock.sentinel.ctx
+ writer = mock.MagicMock()
+ open_mock.return_value = writer
+ service = glance.GlanceImageService(client)
+ res = service.download(ctx, mock.sentinel.image_id,
+ dst_path=mock.sentinel.dst_path)
+
+ self.assertIsNone(res)
+ show_mock.assert_called_once_with(ctx,
+ mock.sentinel.image_id,
+ include_locations=True)
+ get_tran_mock.assert_called_once_with('file')
+ client.call.assert_called_once_with(ctx, 1, 'data',
+ mock.sentinel.image_id)
+ # NOTE(jaypipes): log messages call open() in part of the
+ # download path, so here, we just check that the last open()
+ # call was done for the dst_path file descriptor.
+ open_mock.assert_called_with(mock.sentinel.dst_path, 'wb')
+ self.assertIsNone(res)
+ writer.write.assert_has_calls(
+ [
+ mock.call(1),
+ mock.call(2),
+ mock.call(3)
+ ]
+ )
+ writer.close.assert_called_once_with()
class TestIsImageAvailable(test.NoDBTestCase):
@@ -697,18 +767,19 @@ class TestShow(test.NoDBTestCase):
@mock.patch('nova.image.glance._is_image_available')
def test_show_success(self, is_avail_mock, trans_from_mock):
is_avail_mock.return_value = True
- trans_from_mock.return_value = mock.sentinel.trans_from
+ trans_from_mock.return_value = {'mock': mock.sentinel.trans_from}
client = mock.MagicMock()
- client.call.return_value = mock.sentinel.images_0
+ client.call.return_value = {}
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
info = service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'get',
mock.sentinel.image_id)
- is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
- trans_from_mock.assert_called_once_with(mock.sentinel.images_0)
- self.assertEqual(mock.sentinel.trans_from, info)
+ is_avail_mock.assert_called_once_with(ctx, {})
+ trans_from_mock.assert_called_once_with({}, include_locations=False)
+ self.assertIn('mock', info)
+ self.assertEqual(mock.sentinel.trans_from, info['mock'])
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
@@ -753,7 +824,7 @@ def test_show_queued_image_without_some_attrs(self, is_avail_mock):
client = mock.MagicMock()
# fake image cls without disk_format, container_format, name attributes
- class fake_image_cls(object):
+ class fake_image_cls(dict):
id = 'b31aa5dd-f07a-4748-8f15-398346887584'
deleted = False
protected = False
@@ -781,6 +852,48 @@ class fake_image_cls(object):
self.assertEqual(NOVA_IMAGE_ATTRIBUTES, set(image_info.keys()))
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._is_image_available')
+ def test_include_locations_success(self, avail_mock, trans_from_mock):
+ locations = [mock.sentinel.loc1]
+ avail_mock.return_value = True
+ trans_from_mock.return_value = {'locations': locations}
+
+ client = mock.Mock()
+ client.call.return_value = mock.sentinel.image
+ service = glance.GlanceImageService(client)
+ ctx = mock.sentinel.ctx
+ image_id = mock.sentinel.image_id
+ info = service.show(ctx, image_id, include_locations=True)
+
+ client.call.assert_called_once_with(ctx, 2, 'get', image_id)
+ avail_mock.assert_called_once_with(ctx, mock.sentinel.image)
+ trans_from_mock.assert_called_once_with(mock.sentinel.image,
+ include_locations=True)
+ self.assertIn('locations', info)
+ self.assertEqual(locations, info['locations'])
+
+ @mock.patch('nova.image.glance._translate_from_glance')
+ @mock.patch('nova.image.glance._is_image_available')
+ def test_include_direct_uri_success(self, avail_mock, trans_from_mock):
+ locations = [mock.sentinel.loc1]
+ avail_mock.return_value = True
+ trans_from_mock.return_value = {'locations': locations,
+ 'direct_uri': mock.sentinel.duri}
+
+ client = mock.Mock()
+ client.call.return_value = mock.sentinel.image
+ service = glance.GlanceImageService(client)
+ ctx = mock.sentinel.ctx
+ image_id = mock.sentinel.image_id
+ info = service.show(ctx, image_id, include_locations=True)
+
+ client.call.assert_called_once_with(ctx, 2, 'get', image_id)
+ expected = locations
+ expected.append({'url': mock.sentinel.duri, 'metadata': {}})
+ self.assertIn('locations', info)
+ self.assertEqual(expected, info['locations'])
+
class TestDetail(test.NoDBTestCase):
@@ -826,20 +939,22 @@ def test_detail_success_unavailable(self, is_avail_mock, trans_from_mock,
self.assertFalse(trans_from_mock.called)
self.assertEqual([], images)
- @mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
- def test_detail_params_passed(self, is_avail_mock, _trans_from_mock,
- ext_query_mock):
- params = dict(limit=10)
- ext_query_mock.return_value = params
+ def test_detail_params_passed(self, is_avail_mock, _trans_from_mock):
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
- service.detail(ctx, **params)
+ service.detail(ctx, page_size=5, limit=10)
- client.call.assert_called_once_with(ctx, 1, 'list', limit=10)
+ expected_filters = {
+ 'is_public': 'none'
+ }
+ client.call.assert_called_once_with(ctx, 1, 'list',
+ filters=expected_filters,
+ page_size=5,
+ limit=10)
@mock.patch('nova.image.glance._reraise_translated_exception')
@mock.patch('nova.image.glance._extract_query_params')
@@ -1015,191 +1130,6 @@ def test_delete_client_failure(self):
mock.sentinel.image_id)
-class TestGlanceClientWrapper(test.NoDBTestCase):
-
- def setUp(self):
- super(TestGlanceClientWrapper, self).setUp()
- # host1 has no scheme, which is http by default
- self.flags(api_servers=['host1:9292', 'https://host2:9293',
- 'http://host3:9294'], group='glance')
-
- # Make the test run fast
- def _fake_sleep(secs):
- pass
- self.stubs.Set(time, 'sleep', _fake_sleep)
-
- def test_headers_passed_glanceclient(self):
- auth_token = 'auth_token'
- ctxt = context.RequestContext('fake', 'fake', auth_token=auth_token)
- fake_host = 'host4'
- fake_port = 9295
- fake_use_ssl = False
-
- def _get_fake_glanceclient(version, endpoint, **params):
- fake_client = glance_stubs.StubGlanceClient(version,
- endpoint, **params)
- self.assertIsNotNone(fake_client.auth_token)
- self.assertIsNotNone(fake_client.identity_headers)
- self.assertEqual(fake_client.identity_header['X-Auth_Token'],
- auth_token)
- self.assertEqual(fake_client.identity_header['X-User-Id'], 'fake')
- self.assertIsNone(fake_client.identity_header['X-Roles'])
- self.assertIsNone(fake_client.identity_header['X-Tenant-Id'])
- self.assertIsNone(fake_client.identity_header['X-Service-Catalog'])
- self.assertEqual(fake_client.
- identity_header['X-Identity-Status'],
- 'Confirmed')
-
- self.stubs.Set(glanceclient.Client, '__init__',
- _get_fake_glanceclient)
-
- glance._create_glance_client(ctxt, fake_host, fake_port, fake_use_ssl)
-
- def test_static_client_without_retries(self):
- self.flags(num_retries=0, group='glance')
-
- ctxt = context.RequestContext('fake', 'fake')
- fake_host = 'host4'
- fake_port = 9295
- fake_use_ssl = False
-
- info = {'num_calls': 0}
-
- def _fake_create_glance_client(context, host, port, use_ssl, version):
- self.assertEqual(host, fake_host)
- self.assertEqual(port, fake_port)
- self.assertEqual(use_ssl, fake_use_ssl)
- return _create_failing_glance_client(info)
-
- self.stubs.Set(glance, '_create_glance_client',
- _fake_create_glance_client)
-
- client = glance.GlanceClientWrapper(context=ctxt,
- host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
- self.assertRaises(exception.GlanceConnectionFailed,
- client.call, ctxt, 1, 'get', 'meow')
- self.assertEqual(info['num_calls'], 1)
-
- def test_default_client_without_retries(self):
- self.flags(num_retries=0, group='glance')
-
- ctxt = context.RequestContext('fake', 'fake')
-
- info = {'num_calls': 0,
- 'host': 'host1',
- 'port': 9292,
- 'use_ssl': False}
-
- # Leave the list in a known-order
- def _fake_shuffle(servers):
- pass
-
- def _fake_create_glance_client(context, host, port, use_ssl, version):
- self.assertEqual(host, info['host'])
- self.assertEqual(port, info['port'])
- self.assertEqual(use_ssl, info['use_ssl'])
- return _create_failing_glance_client(info)
-
- self.stubs.Set(random, 'shuffle', _fake_shuffle)
- self.stubs.Set(glance, '_create_glance_client',
- _fake_create_glance_client)
-
- client = glance.GlanceClientWrapper()
- client2 = glance.GlanceClientWrapper()
- self.assertRaises(exception.GlanceConnectionFailed,
- client.call, ctxt, 1, 'get', 'meow')
- self.assertEqual(info['num_calls'], 1)
-
- info = {'num_calls': 0,
- 'host': 'host2',
- 'port': 9293,
- 'use_ssl': True}
-
- def _fake_shuffle2(servers):
- # fake shuffle in a known manner
- servers.append(servers.pop(0))
-
- self.stubs.Set(random, 'shuffle', _fake_shuffle2)
-
- self.assertRaises(exception.GlanceConnectionFailed,
- client2.call, ctxt, 1, 'get', 'meow')
- self.assertEqual(info['num_calls'], 1)
-
- def test_static_client_with_retries(self):
- self.flags(num_retries=1, group='glance')
-
- ctxt = context.RequestContext('fake', 'fake')
- fake_host = 'host4'
- fake_port = 9295
- fake_use_ssl = False
-
- info = {'num_calls': 0}
-
- def _fake_create_glance_client(context, host, port, use_ssl, version):
- self.assertEqual(host, fake_host)
- self.assertEqual(port, fake_port)
- self.assertEqual(use_ssl, fake_use_ssl)
- return _create_failing_glance_client(info)
-
- self.stubs.Set(glance, '_create_glance_client',
- _fake_create_glance_client)
-
- client = glance.GlanceClientWrapper(context=ctxt,
- host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
- client.call(ctxt, 1, 'get', 'meow')
- self.assertEqual(info['num_calls'], 2)
-
- def test_default_client_with_retries(self):
- self.flags(num_retries=1, group='glance')
-
- ctxt = context.RequestContext('fake', 'fake')
-
- info = {'num_calls': 0,
- 'host0': 'host1',
- 'port0': 9292,
- 'use_ssl0': False,
- 'host1': 'host2',
- 'port1': 9293,
- 'use_ssl1': True}
-
- # Leave the list in a known-order
- def _fake_shuffle(servers):
- pass
-
- def _fake_create_glance_client(context, host, port, use_ssl, version):
- attempt = info['num_calls']
- self.assertEqual(host, info['host%s' % attempt])
- self.assertEqual(port, info['port%s' % attempt])
- self.assertEqual(use_ssl, info['use_ssl%s' % attempt])
- return _create_failing_glance_client(info)
-
- self.stubs.Set(random, 'shuffle', _fake_shuffle)
- self.stubs.Set(glance, '_create_glance_client',
- _fake_create_glance_client)
-
- client = glance.GlanceClientWrapper()
- client2 = glance.GlanceClientWrapper()
- client.call(ctxt, 1, 'get', 'meow')
- self.assertEqual(info['num_calls'], 2)
-
- def _fake_shuffle2(servers):
- # fake shuffle in a known manner
- servers.append(servers.pop(0))
-
- self.stubs.Set(random, 'shuffle', _fake_shuffle2)
-
- info = {'num_calls': 0,
- 'host0': 'host2',
- 'port0': 9293,
- 'use_ssl0': True,
- 'host1': 'host3',
- 'port1': 9294,
- 'use_ssl1': False}
-
- client2.call(ctxt, 1, 'get', 'meow')
- self.assertEqual(info['num_calls'], 2)
-
-
class TestGlanceUrl(test.NoDBTestCase):
def test_generate_glance_http_url(self):
@@ -1222,7 +1152,7 @@ def test_generate_glance_https_url(self):
self.assertEqual(generated_url, https_url)
-class TestGlanceApiServers(test.TestCase):
+class TestGlanceApiServers(test.NoDBTestCase):
def test_get_ipv4_api_servers(self):
self.flags(api_servers=['10.0.1.1:9292',
@@ -1257,18 +1187,14 @@ def test_get_ipv6_api_servers(self):
class TestUpdateGlanceImage(test.NoDBTestCase):
- def test_start(self):
+ @mock.patch('nova.image.glance.GlanceImageService')
+ def test_start(self, mock_glance_image_service):
consumer = glance.UpdateGlanceImage(
'context', 'id', 'metadata', 'stream')
- image_service = self.mox.CreateMock(glance.GlanceImageService)
-
- self.mox.StubOutWithMock(glance, 'get_remote_image_service')
-
- glance.get_remote_image_service(
- 'context', 'id').AndReturn((image_service, 'image_id'))
- image_service.update(
- 'context', 'image_id', 'metadata', 'stream', purge_props=False)
- self.mox.ReplayAll()
+ with mock.patch.object(glance, 'get_remote_image_service') as a_mock:
+ a_mock.return_value = (mock_glance_image_service, 'image_id')
- consumer.start()
+ consumer.start()
+ mock_glance_image_service.update.assert_called_with(
+ 'context', 'image_id', 'metadata', 'stream', purge_props=False)
diff --git a/nova/tests/image/test_s3.py b/nova/tests/image/test_s3.py
index d5fb984a30..7472e0fa79 100644
--- a/nova/tests/image/test_s3.py
+++ b/nova/tests/image/test_s3.py
@@ -123,7 +123,7 @@ def test_show_translates_correctly(self):
self.image_service.show(self.context, '1')
def test_show_translates_image_state_correctly(self):
- def my_fake_show(self, context, image_id):
+ def my_fake_show(self, context, image_id, **kwargs):
fake_state_map = {
'155d900f-4e14-4e4c-a73d-069cbf4541e6': 'downloading',
'a2459075-d96c-40d5-893e-577ff92e721c': 'failed_decrypt',
diff --git a/nova/tests/image/test_transfer_modules.py b/nova/tests/image/test_transfer_modules.py
new file mode 100644
index 0000000000..51920c36aa
--- /dev/null
+++ b/nova/tests/image/test_transfer_modules.py
@@ -0,0 +1,101 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urlparse
+
+import mock
+
+from nova import exception
+from nova.image.download import file as tm_file
+from nova import test
+
+
+class TestFileTransferModule(test.NoDBTestCase):
+
+ @mock.patch('nova.virt.libvirt.utils.copy_image')
+ def test_filesystem_success(self, copy_mock):
+ self.flags(allowed_direct_url_schemes=['file'], group='glance')
+ self.flags(group='image_file_url', filesystems=['gluster'])
+
+ mountpoint = '/gluster'
+ url = 'file:///gluster/my/image/path'
+ url_parts = urlparse.urlparse(url)
+ fs_id = 'someid'
+ loc_meta = {
+ 'id': fs_id,
+ 'mountpoint': mountpoint
+ }
+ dst_file = mock.MagicMock()
+
+ tm = tm_file.FileTransfer()
+
+ # NOTE(Jbresnah) The following options must be added after the module
+ # has added the specific groups.
+ self.flags(group='image_file_url:gluster', id=fs_id)
+ self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
+
+ tm.download(mock.sentinel.ctx, url_parts, dst_file, loc_meta)
+ copy_mock.assert_called_once_with('/gluster/my/image/path', dst_file)
+
+ @mock.patch('nova.virt.libvirt.utils.copy_image')
+ def test_filesystem_mismatched_mountpoint(self, copy_mock):
+ self.flags(allowed_direct_url_schemes=['file'], group='glance')
+ self.flags(group='image_file_url', filesystems=['gluster'])
+
+ mountpoint = '/gluster'
+ # Should include the mountpoint before my/image/path
+ url = 'file:///my/image/path'
+ url_parts = urlparse.urlparse(url)
+ fs_id = 'someid'
+ loc_meta = {
+ 'id': fs_id,
+ 'mountpoint': mountpoint
+ }
+ dst_file = mock.MagicMock()
+
+ tm = tm_file.FileTransfer()
+
+ self.flags(group='image_file_url:gluster', id=fs_id)
+ self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
+
+ self.assertRaises(exception.ImageDownloadModuleMetaDataError,
+ tm.download, mock.sentinel.ctx, url_parts,
+ dst_file, loc_meta)
+ self.assertFalse(copy_mock.called)
+
+ @mock.patch('nova.virt.libvirt.utils.copy_image')
+ def test_filesystem_mismatched_filesystem(self, copy_mock):
+ self.flags(allowed_direct_url_schemes=['file'], group='glance')
+ self.flags(group='image_file_url', filesystems=['gluster'])
+
+ mountpoint = '/gluster'
+ # Should include the mountpoint before my/image/path
+ url = 'file:///my/image/path'
+ url_parts = urlparse.urlparse(url)
+ fs_id = 'someid'
+ loc_meta = {
+ 'id': 'funky',
+ 'mountpoint': mountpoint
+ }
+ dst_file = mock.MagicMock()
+
+ tm = tm_file.FileTransfer()
+
+ self.flags(group='image_file_url:gluster', id=fs_id)
+ self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
+
+ self.assertRaises(exception.ImageDownloadModuleError,
+ tm.download, mock.sentinel.ctx, url_parts,
+ dst_file, loc_meta)
+ self.assertFalse(copy_mock.called)
diff --git a/nova/tests/image_fixtures.py b/nova/tests/image_fixtures.py
new file mode 100644
index 0000000000..9ab09b989a
--- /dev/null
+++ b/nova/tests/image_fixtures.py
@@ -0,0 +1,79 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+# nova.image.glance._translate_from_glance() returns datetime
+# objects, not strings.
+NOW_DATE = datetime.datetime(2010, 10, 11, 10, 30, 22)
+
+
+def get_image_fixtures():
+ """Returns a set of image fixture dicts for use in unit tests.
+
+ Returns a set of dicts representing images/snapshots of varying statuses
+ that would be returned from a call to
+ `glanceclient.client.Client.images.list`. The IDs of the images returned
+ start at 123 and go to 131, with the following brief summary of image
+ attributes:
+
+ | ID Type Status Notes
+ | ----------------------------------------------------------
+ | 123 Public image active
+ | 124 Snapshot queued
+ | 125 Snapshot saving
+ | 126 Snapshot active
+ | 127 Snapshot killed
+ | 128 Snapshot deleted
+ | 129 Snapshot pending_delete
+ | 130 Public image active Has no name
+
+ """
+
+ image_id = 123
+
+ fixtures = []
+
+ def add_fixture(**kwargs):
+ kwargs.update(created_at=NOW_DATE,
+ updated_at=NOW_DATE)
+ fixtures.append(kwargs)
+
+ # Public image
+ add_fixture(id=str(image_id), name='public image', is_public=True,
+ status='active', properties={'key1': 'value1'},
+ min_ram="128", min_disk="10", size='25165824')
+ image_id += 1
+
+ # Snapshot for User 1
+ uuid = 'aa640691-d1a7-4a67-9d3c-d35ee6b3cc74'
+ snapshot_properties = {'instance_uuid': uuid, 'user_id': 'fake'}
+ for status in ('queued', 'saving', 'active', 'killed',
+ 'deleted', 'pending_delete'):
+ deleted = False if status != 'deleted' else True
+ deleted_at = NOW_DATE if deleted else None
+
+ add_fixture(id=str(image_id), name='%s snapshot' % status,
+ is_public=False, status=status,
+ properties=snapshot_properties, size='25165824',
+ deleted=deleted, deleted_at=deleted_at)
+ image_id += 1
+
+ # Image without a name
+ add_fixture(id=str(image_id), is_public=True, status='active',
+ properties={})
+ # Image for permission tests
+ image_id += 1
+ add_fixture(id=str(image_id), is_public=True, status='active',
+ properties={}, owner='authorized_fake')
+
+ return fixtures
diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py
index da80e5bd3b..0e80d98baf 100644
--- a/nova/tests/integrated/api/client.py
+++ b/nova/tests/integrated/api/client.py
@@ -17,7 +17,7 @@
import six.moves.urllib.parse as urlparse
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.tests.image import fake
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index c408950225..6830422bff 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -416,6 +416,14 @@
"namespace": "http://docs.openstack.org/compute/ext/extended_hypervisors/api/v1.1",
"updated": "%(isotime)s"
},
+ {
+ "alias": "os-hypervisor-status",
+ "description": "%(text)s",
+ "links": [],
+ "name": "HypervisorStatus",
+ "namespace": "http://docs.openstack.org/compute/ext/hypervisor_status/api/v1.1",
+ "updated": "%(isotime)s"
+ },
{
"alias": "os-server-external-events",
"description": "%(text)s",
@@ -480,6 +488,14 @@
"namespace": "http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1",
"updated": "%(isotime)s"
},
+ {
+ "alias": "os-extended-networks",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_networks/api/v2",
+ "updated": "%(isotime)s"
+ },
{
"alias": "os-extended-quotas",
"description": "%(text)s",
@@ -544,6 +560,14 @@
"namespace": "http://docs.openstack.org/compute/ext/server-diagnostics/api/v1.1",
"updated": "%(isotime)s"
},
+ {
+ "alias": "os-server-list-multi-status",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ServerListMultiStatus",
+ "namespace": "http://docs.openstack.org/compute/ext/os-server-list-multi-status/api/v2",
+ "updated": "%(isotime)s"
+ },
{
"alias": "os-server-password",
"description": "%(text)s",
@@ -671,6 +695,14 @@
"name": "ServerGroups",
"namespace": "http://docs.openstack.org/compute/ext/servergroups/api/v2",
"updated": "%(isotime)s"
+ },
+ {
+ "alias": "os-extended-evacuate-find-host",
+ "description": "%(text)s",
+ "links": [],
+ "name": "ExtendedEvacuateFindHost",
+ "namespace": "http://docs.openstack.org/compute/ext/extended_evacuate_find_host/api/v2",
+ "updated": "%(isotime)s"
}
]
}
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index 08c9b9f3c8..9cacb12676 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -150,6 +150,9 @@
%(text)s
+
+ %(text)s
+
%(text)s
@@ -171,6 +174,9 @@
%(text)s
+
+ %(text)s
+
%(text)s
@@ -195,6 +201,9 @@
%(text)s
+
+ %(text)s
+
%(text)s
@@ -251,4 +260,7 @@
%(text)s
+
+ %(text)s
+
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl
new file mode 100644
index 0000000000..5e2c2e6ef0
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s",
+ "onSharedStorage": "%(onSharedStorage)s"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl
new file mode 100644
index 0000000000..a86c9e5c8a
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-req.xml.tpl
@@ -0,0 +1,5 @@
+
+
+
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl
new file mode 100644
index 0000000000..0da07da5b8
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "adminPass": "%(password)s"
+}
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl
new file mode 100644
index 0000000000..b3b95fdde4
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-evacuate-find-host-resp.xml.tpl
@@ -0,0 +1,2 @@
+
+%(password)s
\ No newline at end of file
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+
+
+
+ Apache1
+
+
+
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+
+
+
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-evacuate-find-host/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+
+
+
+
+
+
diff --git a/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl b/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl
index 6b287a838c..a1e5f2080b 100644
--- a/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.json.tpl
@@ -8,7 +8,7 @@
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": %(hypervisor_id)s,
"local_gb": 1028,
"local_gb_used": 0,
diff --git a/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl b/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl
index 5b9f66416e..ed2a8b0829 100644
--- a/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-extended-hypervisors/hypervisors-show-with-ip-resp.xml.tpl
@@ -1,4 +1,4 @@
-
+
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.json.tpl b/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.json.tpl
new file mode 100644
index 0000000000..18515bd6c4
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.json.tpl
@@ -0,0 +1,12 @@
+{
+ "network": {
+ "label": "new net 111",
+ "cidr": "10.20.105.0/24",
+ "mtu": 9000,
+ "dhcp_server": "10.20.105.2",
+ "enable_dhcp": false,
+ "share_address": true,
+ "allowed_start": "10.20.105.10",
+ "allowed_end": "10.20.105.200"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl b/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl
new file mode 100644
index 0000000000..3cc79bd837
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-networks/network-create-req.xml.tpl
@@ -0,0 +1,10 @@
+
+
+ 10.20.105.0/24
+ 9000
+ 10.20.105.2
+ False
+ True
+ 10.20.105.10
+ 10.20.105.200
+
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl b/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl
new file mode 100644
index 0000000000..5cf155b13f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.json.tpl
@@ -0,0 +1,36 @@
+{
+ "network": {
+ "bridge": null,
+ "vpn_public_port": null,
+ "dhcp_start": "%(ip)s",
+ "bridge_interface": null,
+ "updated_at": null,
+ "id": "%(id)s",
+ "cidr_v6": null,
+ "deleted_at": null,
+ "gateway": "%(ip)s",
+ "rxtx_base": null,
+ "label": "new net 111",
+ "priority": null,
+ "project_id": null,
+ "vpn_private_address": null,
+ "deleted": null,
+ "vlan": null,
+ "broadcast": "%(ip)s",
+ "netmask": "%(ip)s",
+ "injected": null,
+ "cidr": "10.20.105.0/24",
+ "vpn_public_address": null,
+ "multi_host": null,
+ "dns2": null,
+ "created_at": null,
+ "host": null,
+ "gateway_v6": null,
+ "netmask_v6": null,
+ "dns1": null,
+ "mtu": 9000,
+ "dhcp_server": "10.20.105.2",
+ "enable_dhcp": false,
+ "share_address": true
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl b/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl
new file mode 100644
index 0000000000..3a757c5f2f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-networks/network-create-resp.xml.tpl
@@ -0,0 +1,34 @@
+
+ None
+ None
+ %(ip)s
+ None
+ None
+ %(id)s
+ None
+ None
+ %(ip)s
+ None
+
+ None
+ None
+ None
+ False
+ None
+ %(ip)s
+ %(ip)s
+ None
+ 10.20.105.0/24
+ None
+ None
+ None
+ None
+ None
+ None
+ None
+ None
+ 9000
+ 10.20.105.2
+ False
+ True
+
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl b/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl
new file mode 100644
index 0000000000..ac75fe7fb1
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.json.tpl
@@ -0,0 +1,37 @@
+{
+ "network":
+ {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_0",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "updated_at": "%(strtime)s",
+ "vlan": 100,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": "%(ip)s",
+ "vpn_public_port": 1000,
+ "mtu": null,
+ "dhcp_server": "%(ip)s",
+ "enable_dhcp": true,
+ "share_address": false
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl b/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl
new file mode 100644
index 0000000000..3139ca88a8
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-networks/network-show-resp.xml.tpl
@@ -0,0 +1,35 @@
+
+
+ br100
+ 1000
+ %(ip)s
+ eth0
+ %(xmltime)s
+ %(id)s
+ None
+ None
+ %(ip)s
+ None
+
+ None
+ 1234
+ %(ip)s
+ False
+ 100
+ %(ip)s
+ %(ip)s
+ False
+ 10.0.0.0/29
+ %(ip)s
+ False
+ None
+ %(xmltime)s
+ nsokolov-desktop
+ None
+ None
+ None
+ None
+ %(ip)s
+ True
+ False
+
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl
new file mode 100644
index 0000000000..ccdd586a0f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.json.tpl
@@ -0,0 +1,72 @@
+{
+ "networks": [
+ {
+ "bridge": "br100",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.0/29",
+ "cidr_v6": null,
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": "nsokolov-desktop",
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_0",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": "1234",
+ "rxtx_base": null,
+ "updated_at": "%(strtime)s",
+ "vlan": 100,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": "%(ip)s",
+ "vpn_public_port": 1000,
+ "mtu": null,
+ "dhcp_server": "%(ip)s",
+ "enable_dhcp": true,
+ "share_address": false
+ },
+ {
+ "bridge": "br101",
+ "bridge_interface": "eth0",
+ "broadcast": "%(ip)s",
+ "cidr": "10.0.0.10/29",
+ "cidr_v6": null,
+ "created_at": "%(strtime)s",
+ "deleted": false,
+ "deleted_at": null,
+ "dhcp_start": "%(ip)s",
+ "dns1": null,
+ "dns2": null,
+ "gateway": "%(ip)s",
+ "gateway_v6": null,
+ "host": null,
+ "id": "%(id)s",
+ "injected": false,
+ "label": "mynet_1",
+ "multi_host": false,
+ "netmask": "%(ip)s",
+ "netmask_v6": null,
+ "priority": null,
+ "project_id": null,
+ "rxtx_base": null,
+ "updated_at": null,
+ "vlan": 101,
+ "vpn_private_address": "%(ip)s",
+ "vpn_public_address": null,
+ "vpn_public_port": 1001,
+ "mtu": null,
+ "dhcp_server": "%(ip)s",
+ "enable_dhcp": true,
+ "share_address": false
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl
new file mode 100644
index 0000000000..0b7f456402
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-extended-networks/networks-list-resp.xml.tpl
@@ -0,0 +1,71 @@
+
+
+
+ br100
+ 1000
+ %(ip)s
+ eth0
+ %(xmltime)s
+ %(id)s
+ None
+ None
+ %(ip)s
+ None
+
+ None
+ 1234
+ %(ip)s
+ False
+ 100
+ %(ip)s
+ %(ip)s
+ False
+ 10.0.0.0/29
+ %(ip)s
+ False
+ None
+ %(xmltime)s
+ nsokolov-desktop
+ None
+ None
+ None
+ None
+ %(ip)s
+ True
+ False
+
+
+ br101
+ 1001
+ %(ip)s
+ eth0
+ None
+ %(id)s
+ None
+ None
+ %(ip)s
+ None
+
+ None
+ None
+ %(ip)s
+ False
+ 101
+ %(ip)s
+ %(ip)s
+ False
+ 10.0.0.10/29
+ None
+ False
+ None
+ %(xmltime)s
+ None
+ None
+ None
+ None
+ None
+ %(ip)s
+ True
+ False
+
+
diff --git a/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl b/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl
new file mode 100644
index 0000000000..14464ccf4d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.json.tpl
@@ -0,0 +1,27 @@
+{
+ "hypervisor": {
+ "cpu_info": "?",
+ "current_workload": 0,
+ "disk_available_least": 0,
+ "free_disk_gb": 1028,
+ "free_ram_mb": 7680,
+ "hypervisor_hostname": "fake-mini",
+ "hypervisor_type": "fake",
+ "hypervisor_version": 1000,
+ "id": %(hypervisor_id)s,
+ "local_gb": 1028,
+ "local_gb_used": 0,
+ "memory_mb": 8192,
+ "memory_mb_used": 512,
+ "running_vms": 0,
+ "state": "up",
+ "status": "enabled",
+ "service": {
+ "host": "%(host_name)s",
+ "id": 2,
+ "disabled_reason": null
+ },
+ "vcpus": 1,
+ "vcpus_used": 0
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl
new file mode 100644
index 0000000000..6cfd860af5
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-hypervisor-status/hypervisors-show-with-status-resp.xml.tpl
@@ -0,0 +1,4 @@
+
+
+
+
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
index db29146071..9ccda9c7e6 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
@@ -9,7 +9,7 @@
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl
index e8d8a3f40a..1169ce1e01 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-detail-resp.xml.tpl
@@ -1,6 +1,6 @@
-
+
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
index c9638423d2..356316d61f 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
@@ -7,7 +7,7 @@
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": %(hypervisor_id)s,
"local_gb": 1028,
"local_gb_used": 0,
diff --git a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl
index d7af1246c9..090f720398 100644
--- a/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-hypervisors/hypervisors-show-resp.xml.tpl
@@ -1,4 +1,4 @@
-
+
diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl
new file mode 100644
index 0000000000..d3916d1aa6
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server" : {
+ "name" : "new-server-test",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality" : [
+ {
+ "path" : "/etc/banner.txt",
+ "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl
new file mode 100644
index 0000000000..f926149842
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-req.xml.tpl
@@ -0,0 +1,19 @@
+
+
+
+ Apache1
+
+
+
+ ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp
+ dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k
+ IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs
+ c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g
+ QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo
+ ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv
+ dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy
+ c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6
+ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==
+
+
+
diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl
new file mode 100644
index 0000000000..d5f030c873
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl
new file mode 100644
index 0000000000..3bb13e69bd
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-list-multi-status/server-post-resp.xml.tpl
@@ -0,0 +1,6 @@
+
+
+
+
+
+
diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl b/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..8b97dc28d7
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(host)s/v2/openstack/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(host)s/openstack/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl b/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl
new file mode 100644
index 0000000000..03bee03a6e
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-server-list-multi-status/servers-list-resp.xml.tpl
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
diff --git a/nova/tests/integrated/api_samples_test_base.py b/nova/tests/integrated/api_samples_test_base.py
index 6a0f372460..8932adbee2 100644
--- a/nova/tests/integrated/api_samples_test_base.py
+++ b/nova/tests/integrated/api_samples_test_base.py
@@ -19,7 +19,7 @@
from lxml import etree
import six
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova import test
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index 62c36a0889..dc04dacc77 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -69,7 +69,6 @@ def setUp(self):
f = self._get_flags()
self.flags(**f)
self.flags(verbose=True)
- self.flags(periodic_enable=False)
self.useFixture(test.ReplaceModule('crypto', fake_crypto))
nova.tests.image.fake.stub_out_image_service(self.stubs)
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 29aa19af93..13818e7e3d 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -17,7 +17,6 @@
import copy
import datetime
import inspect
-import json
import os
import re
import urllib
@@ -236,6 +235,23 @@ class ServersSampleHideAddressesXMLTest(ServersSampleHideAddressesJsonTest):
ctype = 'xml'
+class ServersSampleMultiStatusJsonTest(ServersSampleBase):
+ extension_name = '.'.join(('nova.api.openstack.compute.contrib',
+ 'server_list_multi_status',
+ 'Server_list_multi_status'))
+
+ def test_servers_list(self):
+ uuid = self._post_server()
+ response = self._do_get('servers?status=active&status=error')
+ subs = self._get_regexes()
+ subs['id'] = uuid
+ self._verify_response('servers-list-resp', subs, response, 200)
+
+
+class ServersSampleMultiStatusXMLTest(ServersSampleMultiStatusJsonTest):
+ ctype = 'xml'
+
+
class ServersMetadataJsonTest(ServersSampleBase):
def _create_and_set(self, subs):
uuid = self._post_server()
@@ -989,6 +1005,7 @@ def test_floating_ips_delete(self):
self.test_floating_ips_create()
response = self._do_delete('os-floating-ips/%d' % 1)
self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), "")
class ExtendedFloatingIpsJsonTest(FloatingIpsJsonTest):
@@ -1345,6 +1362,7 @@ def test_cloud_pipe_update(self):
'cloud-pipe-update-req',
subs)
self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), "")
class CloudPipeUpdateXmlTest(CloudPipeUpdateJsonTest):
@@ -1362,7 +1380,7 @@ def _get_flags(self):
def setUp(self):
super(AgentsJsonTest, self).setUp()
- fake_agents_list = [{'url': 'xxxxxxxxxxxx',
+ fake_agents_list = [{'url': 'http://example.com/path/to/resource',
'hypervisor': 'hypervisor',
'architecture': 'x86',
'os': 'os',
@@ -1403,7 +1421,7 @@ def fake_agent_build_destroy(context, agent_update_id):
def test_agent_create(self):
# Creates a new agent build.
- project = {'url': 'xxxxxxxxxxxx',
+ project = {'url': 'http://example.com/path/to/resource',
'hypervisor': 'hypervisor',
'architecture': 'x86',
'os': 'os',
@@ -1419,7 +1437,7 @@ def test_agent_create(self):
def test_agent_list(self):
# Return a list of all agent builds.
response = self._do_get('os-agents')
- project = {'url': 'xxxxxxxxxxxx',
+ project = {'url': 'http://example.com/path/to/resource',
'hypervisor': 'hypervisor',
'architecture': 'x86',
'os': 'os',
@@ -1433,7 +1451,7 @@ def test_agent_update(self):
# Update an existing agent build.
agent_id = 1
subs = {'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}
response = self._do_put('os-agents/%s' % agent_id,
'agent-update-put-req', subs)
@@ -1534,6 +1552,7 @@ def test_fixed_ip_reserve(self):
'fixedip-post-req',
project)
self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), "")
def test_get_fixed_ip(self):
# Return data about the given fixed ip.
@@ -1771,7 +1790,7 @@ def test_service_detail(self):
'status': 'disabled',
'state': 'up'}
subs.update(self._get_regexes())
- return self._verify_response('services-get-resp',
+ self._verify_response('services-get-resp',
subs, response, 200)
def test_service_disable_log_reason(self):
@@ -2036,7 +2055,8 @@ def fake_get_compute(context, host):
report_count=1,
updated_at='foo',
hypervisor_type='bar',
- hypervisor_version='1',
+ hypervisor_version=
+ utils.convert_version_to_int('1.0'),
disabled=False)
return {'compute_node': [service]}
self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
@@ -2110,7 +2130,7 @@ class ConsoleAuthTokensSampleJsonTests(ServersSampleBase):
"Console_auth_tokens")
def _get_console_url(self, data):
- return json.loads(data)["console"]["url"]
+ return jsonutils.loads(data)["console"]["url"]
def _get_console_token(self, uuid):
response = self._do_post('servers/%s/action' % uuid,
@@ -2582,10 +2602,11 @@ def test_create_network(self):
def test_delete_network(self):
response = self._do_post('os-tenant-networks', "networks-post-req", {})
- net = json.loads(response.read())
+ net = jsonutils.loads(response.read())
response = self._do_delete('os-tenant-networks/%s' %
net["network"]["id"])
self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), "")
class OsNetworksXmlTests(OsNetworksJsonTests):
@@ -2597,6 +2618,7 @@ def test_delete_network(self):
network_id = net.find('id').text
response = self._do_delete('os-tenant-networks/%s' % network_id)
self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), "")
class NetworksJsonTests(ApiSampleTestBaseV2):
@@ -2629,6 +2651,7 @@ def test_network_disassociate(self):
response = self._do_post('os-networks/%s/action' % uuid,
'networks-disassociate-req', {})
self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), "")
def test_network_show(self):
uuid = test_networks.FAKE_NETWORKS[0]['uuid']
@@ -2646,12 +2669,62 @@ def test_network_add(self):
response = self._do_post("os-networks/add",
'network-add-req', {})
self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), "")
+
+ def test_network_delete(self):
+ response = self._do_delete('os-networks/always_delete')
+ self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), "")
class NetworksXmlTests(NetworksJsonTests):
ctype = 'xml'
+class ExtendedNetworksJsonTests(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib."
+ "os_networks.Os_networks")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "extended_networks.Extended_networks")
+
+ def setUp(self):
+ super(ExtendedNetworksJsonTests, self).setUp()
+ fake_network_api = test_networks.FakeNetworkAPI()
+ self.stubs.Set(network_api.API, "get_all",
+ fake_network_api.get_all)
+ self.stubs.Set(network_api.API, "get",
+ fake_network_api.get)
+ self.stubs.Set(network_api.API, "associate",
+ fake_network_api.associate)
+ self.stubs.Set(network_api.API, "delete",
+ fake_network_api.delete)
+ self.stubs.Set(network_api.API, "create",
+ fake_network_api.create)
+ self.stubs.Set(network_api.API, "add_network_to_project",
+ fake_network_api.add_network_to_project)
+
+ def test_network_list(self):
+ response = self._do_get('os-networks')
+ subs = self._get_regexes()
+ self._verify_response('networks-list-resp', subs, response, 200)
+
+ def test_network_show(self):
+ uuid = test_networks.FAKE_NETWORKS[0]['uuid']
+ response = self._do_get('os-networks/%s' % uuid)
+ subs = self._get_regexes()
+ self._verify_response('network-show-resp', subs, response, 200)
+
+ def test_network_create(self):
+ response = self._do_post("os-networks",
+ 'network-create-req', {})
+ subs = self._get_regexes()
+ self._verify_response('network-create-resp', subs, response, 200)
+
+
+class ExtendedNetworksXmlTests(ExtendedNetworksJsonTests):
+ ctype = 'xml'
+
+
class NetworksAssociateJsonTests(ApiSampleTestBaseV2):
extension_name = ("nova.api.openstack.compute.contrib"
".networks_associate.Networks_associate")
@@ -2681,24 +2754,28 @@ def test_disassociate(self):
'network-disassociate-req',
{})
self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), "")
def test_disassociate_host(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-host-req',
{})
self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), "")
def test_disassociate_project(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-project-req',
{})
self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), "")
def test_associate_host(self):
response = self._do_post('os-networks/1/action',
'network-associate-host-req',
{"host": "testHost"})
self.assertEqual(response.status, 202)
+ self.assertEqual(response.read(), "")
class NetworksAssociateXmlTests(NetworksAssociateJsonTests):
@@ -3004,8 +3081,7 @@ def test_list_floatingippools(self):
pool_list = ["pool1", "pool2"]
def fake_get_floating_ip_pools(self, context):
- return [{'name': pool_list[0]},
- {'name': pool_list[1]}]
+ return pool_list
self.stubs.Set(network_api.API, "get_floating_ip_pools",
fake_get_floating_ip_pools)
@@ -3238,6 +3314,53 @@ class EvacuateXmlTest(EvacuateJsonTest):
ctype = 'xml'
+class EvacuateFindHostSampleJsonTest(ServersSampleBase):
+ extends_name = ("nova.api.openstack.compute.contrib"
+ ".evacuate.Evacuate")
+
+ extension_name = ("nova.api.openstack.compute.contrib"
+ ".extended_evacuate_find_host.Extended_evacuate_find_host")
+
+ @mock.patch('nova.compute.manager.ComputeManager._check_instance_exists')
+ @mock.patch('nova.compute.api.HostAPI.service_get_by_compute_host')
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate(self, rebuild_mock, service_get_mock,
+ check_instance_mock):
+ self.uuid = self._post_server()
+
+ req_subs = {
+ "adminPass": "MySecretPass",
+ "onSharedStorage": 'False'
+ }
+
+ check_instance_mock.return_value = False
+
+ def fake_service_get_by_compute_host(self, context, host):
+ return {
+ 'host_name': host,
+ 'service': 'compute',
+ 'zone': 'nova'
+ }
+ service_get_mock.side_effect = fake_service_get_by_compute_host
+ with mock.patch.object(service_group_api.API, 'service_is_up',
+ return_value=False):
+ response = self._do_post('servers/%s/action' % self.uuid,
+ 'server-evacuate-find-host-req', req_subs)
+ subs = self._get_regexes()
+ self._verify_response('server-evacuate-find-host-resp', subs,
+ response, 200)
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=False, preserve_ephemeral=mock.ANY,
+ host=None)
+
+
+class EvacuateFindHostSampleXmlTests(EvacuateFindHostSampleJsonTest):
+ ctype = "xml"
+
+
class FloatingIpDNSJsonTest(ApiSampleTestBaseV2):
extension_name = ("nova.api.openstack.compute.contrib.floating_ip_dns."
"Floating_ip_dns")
@@ -3536,6 +3659,12 @@ class HypervisorsSampleJsonTests(ApiSampleTestBaseV2):
extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
"Hypervisors")
+ def setUp(self):
+ super(HypervisorsSampleJsonTests, self).setUp()
+ mock.patch("nova.servicegroup.API.service_is_up",
+ return_value=True).start()
+ self.addCleanup(mock.patch.stopall)
+
def test_hypervisors_list(self):
response = self._do_get('os-hypervisors')
self._verify_response('hypervisors-list-resp', {}, response, 200)
@@ -3598,9 +3727,31 @@ def test_hypervisors_show_with_ip(self):
class ExtendedHypervisorsXmlTest(ExtendedHypervisorsJsonTest):
+ ctype = "xml"
+
+
+class HypervisorStatusJsonTest(ApiSampleTestBaseV2):
+ extends_name = ("nova.api.openstack.compute.contrib."
+ "hypervisors.Hypervisors")
+ extension_name = ("nova.api.openstack.compute.contrib."
+ "hypervisor_status.Hypervisor_status")
+
+ def test_hypervisors_show_with_status(self):
+ hypervisor_id = 1
+ subs = {
+ 'hypervisor_id': hypervisor_id
+ }
+ response = self._do_get('os-hypervisors/%s' % hypervisor_id)
+ subs.update(self._get_regexes())
+ self._verify_response('hypervisors-show-with-status-resp',
+ subs, response, 200)
+
+
+class HypervisorStatusXmlTest(HypervisorStatusJsonTest):
ctype = 'xml'
+@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
class HypervisorsCellsSampleJsonTests(ApiSampleTestBaseV2):
extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
"Hypervisors")
@@ -3609,9 +3760,11 @@ def setUp(self):
self.flags(enable=True, cell_type='api', group='cells')
super(HypervisorsCellsSampleJsonTests, self).setUp()
- def test_hypervisor_uptime(self):
- fake_hypervisor = {'service': {'host': 'fake-mini'}, 'id': 1,
- 'hypervisor_hostname': 'fake-mini'}
+ def test_hypervisor_uptime(self, mocks):
+ fake_hypervisor = {'service': {'host': 'fake-mini',
+ 'disabled': False,
+ 'disabled_reason': None},
+ 'id': 1, 'hypervisor_hostname': 'fake-mini'}
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
@@ -3894,7 +4047,7 @@ class VolumeAttachmentsSampleBase(ServersSampleBase):
def _stub_db_bdms_get_all_by_instance(self, server_id):
def fake_bdms_get_all_by_instance(context, instance_uuid,
- use_slave=False):
+ use_subordinate=False):
bdms = [
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803',
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 06e8c0401f..6142e045e3 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -282,7 +282,7 @@ def _wait_for_deletion(self, server_id):
LOG.debug("Found_server=%s" % found_server)
# TODO(justinsb): Mock doesn't yet do accurate state changes
- #if found_server['status'] != 'deleting':
+ # if found_server['status'] != 'deleting':
# break
time.sleep(.1)
@@ -512,8 +512,8 @@ def test_create_server_with_injected_files(self):
class ServersTestV3(client.TestOpenStackClientV3Mixin, ServersTest):
_force_delete_parameter = 'force_delete'
_api_version = 'v3'
- _image_ref_parameter = 'image_ref'
- _flavor_ref_parameter = 'flavor_ref'
+ _image_ref_parameter = 'imageRef'
+ _flavor_ref_parameter = 'flavorRef'
_return_resv_id_parameter = 'os-multiple-create:return_reservation_id'
_min_count_parameter = 'os-multiple-create:min_count'
_access_ipv4_parameter = None
diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl
index e000296910..ed9c311145 100644
--- a/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/all_extensions/server-get-resp.json.tpl
@@ -22,13 +22,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
@@ -48,15 +48,15 @@
"My Server Name": "Apache1"
},
"name": "new-server-test",
- "os-config-drive:config_drive": "",
+ "config_drive": "",
"os-extended-availability-zone:availability_zone": "nova",
- "os-extended-server-attributes:host": "%(compute_host)s",
- "os-extended-server-attributes:hypervisor_hostname": "%(hypervisor_hostname)s",
- "os-extended-server-attributes:instance_name": "instance-00000001",
- "os-extended-status:locked_by": null,
- "os-extended-status:power_state": 1,
- "os-extended-status:task_state": null,
- "os-extended-status:vm_state": "active",
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-STS:locked_by": null,
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
"os-extended-volumes:volumes_attached": [],
"os-pci:pci_devices": [{"id": 1}],
"os-server-usage:launched_at": "%(strtime)s",
diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/all_extensions/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl
index 740c3909b9..7f18b0677e 100644
--- a/nova/tests/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/all_extensions/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl
index 652714cf0f..ec50d2c8e8 100644
--- a/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/all_extensions/servers-details-resp.json.tpl
@@ -23,13 +23,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
@@ -49,15 +49,15 @@
"My Server Name": "Apache1"
},
"name": "new-server-test",
- "os-config-drive:config_drive": "",
+ "config_drive": "",
"os-extended-availability-zone:availability_zone": "nova",
- "os-extended-server-attributes:host": "%(compute_host)s",
- "os-extended-server-attributes:hypervisor_hostname": "%(hypervisor_hostname)s",
- "os-extended-server-attributes:instance_name": "instance-00000001",
- "os-extended-status:locked_by": null,
- "os-extended-status:power_state": 1,
- "os-extended-status:task_state": null,
- "os-extended-status:vm_state": "active",
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-STS:locked_by": null,
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
"os-extended-volumes:volumes_attached": [],
"os-pci:pci_devices": [{"id": 1}],
"os-server-usage:launched_at": "%(strtime)s",
diff --git a/nova/tests/integrated/v3/api_samples/consoles/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/consoles/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/consoles/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/consoles/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/consoles/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/consoles/server-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/consoles/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/consoles/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl
index 61dc8279f8..bb115f6085 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild-resp.json.tpl
@@ -10,7 +10,7 @@
}
]
},
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"created": "%(isotime)s",
"flavor": {
"id": "1",
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(uuid)s",
"image": {
"id": "%(image_id)s",
"links": [
{
- "href": "%(glance_host)s/images/%(image_id)s",
+ "href": "%(host)s/images/%(image_id)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl
index 544edbf3fb..f1f7ed03c2 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-access-ips/server-action-rebuild.json.tpl
@@ -2,7 +2,7 @@
"rebuild" : {
"os-access-ips:access_ip_v4": "%(access_ip_v4)s",
"os-access-ips:access_ip_v6": "%(access_ip_v6)s",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
"name" : "new-server-test",
"metadata" : {
"meta_var" : "meta_val"
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl
index 24b097f18f..9454f2be6d 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-access-ips/server-get-resp.json.tpl
@@ -22,13 +22,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl
index d1f9852611..d99d2562aa 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-req.json.tpl
@@ -3,8 +3,8 @@
"os-access-ips:access_ip_v4": "%(access_ip_v4)s",
"os-access-ips:access_ip_v6": "%(access_ip_v6)s",
"name" : "new-server-test",
- "image_ref" : "%(host)s/openstack/images/%(image_id)s",
- "flavor_ref" : "%(host)s/openstack/flavors/1",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl
index 835bf9a813..495b3188fa 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-access-ips/server-post-resp.json.tpl
@@ -2,7 +2,7 @@
"server": {
"os-access-ips:access_ip_v4": "%(access_ip_v4)s",
"os-access-ips:access_ip_v6": "%(access_ip_v6)s",
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl
index 925a162c30..7e4d0dd7aa 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-access-ips/server-put-resp.json.tpl
@@ -22,13 +22,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl
index 7a14faf6f0..447fa00500 100644
--- a/nova/tests/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-access-ips/servers-details-resp.json.tpl
@@ -23,13 +23,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-admin-actions/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl b/nova/tests/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl
index f58ef6e484..da615718fe 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-admin-password/admin-password-change-password.json.tpl
@@ -1,5 +1,5 @@
{
- "change_password" : {
- "admin_password" : "%(password)s"
+ "changePassword" : {
+ "adminPass" : "%(password)s"
}
}
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-admin-password/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl
index f6c760cc67..24ddede90b 100644
--- a/nova/tests/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-agents/agent-post-resp.json.tpl
@@ -5,7 +5,7 @@
"hypervisor": "hypervisor",
"md5hash": "add6bb58e139be103324d04d82d8f545",
"os": "os",
- "url": "xxxxxxxxxxxx",
+ "url": "http://example.com/path/to/resource",
"version": "8.0"
}
-}
\ No newline at end of file
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl
index 866994e4c9..2964c0f894 100644
--- a/nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-agents/agent-update-put-resp.json.tpl
@@ -2,7 +2,7 @@
"agent": {
"agent_id": 1,
"md5hash": "add6bb58e139be103324d04d82d8f545",
- "url": "xxx://xxxx/xxx/xxx",
+ "url": "http://example.com/path/to/resource",
"version": "7.0"
}
-}
\ No newline at end of file
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl
index 73ba45c240..92e14e1dc5 100644
--- a/nova/tests/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-agents/agents-get-resp.json.tpl
@@ -6,8 +6,8 @@
"hypervisor": "hypervisor",
"md5hash": "add6bb58e139be103324d04d82d8f545",
"os": "os",
- "url": "xxxxxxxxxxxx",
+ "url": "http://example.com/path/to/resource",
"version": "8.0"
}
]
-}
\ No newline at end of file
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
index e0fcbe86a0..11dcf64373 100644
--- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-req.json.tpl
@@ -1,5 +1,5 @@
{
- "interface_attachment": {
+ "interfaceAttachment": {
"port_id": "ce531f90-199f-48c0-816c-13e38010b442"
}
}
diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
index 93b68d9c69..9dff234366 100644
--- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-create-resp.json.tpl
@@ -1,5 +1,5 @@
{
- "interface_attachment": {
+ "interfaceAttachment": {
"fixed_ips": [
{
"ip_address": "192.168.1.3",
diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
index 9d977378b7..192f9a6487 100644
--- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-list-resp.json.tpl
@@ -1,5 +1,5 @@
{
- "interface_attachments": [
+ "interfaceAttachments": [
{
"fixed_ips": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
index 93b68d9c69..9dff234366 100644
--- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/attach-interfaces-show-resp.json.tpl
@@ -1,5 +1,5 @@
{
- "interface_attachment": {
+ "interfaceAttachment": {
"fixed_ips": [
{
"ip_address": "192.168.1.3",
diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-attach-interfaces/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl
index 39b6986b9f..f0fa5a5b42 100644
--- a/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(host)s/openstack/images/%(image_id)s",
- "flavor_ref" : "%(host)s/openstack/flavors/1",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
"os-availability-zone:availability_zone": "nova",
"metadata" : {
"My Server Name" : "Apache1"
diff --git a/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-availability-zone/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
index 13f51f5875..e07ccc46cd 100644
--- a/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-config-drive/server-config-drive-get-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "os-config-drive:config_drive": "%(cdrive)s",
+ "config_drive": "%(cdrive)s",
"addresses": {
"private": [
{
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-config-drive/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
index cc7fe80d46..b4bcf7c3fe 100644
--- a/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-config-drive/servers-config-drive-details-resp.json.tpl
@@ -1,7 +1,7 @@
{
"servers": [
{
- "os-config-drive:config_drive": "%(cdrive)s",
+ "config_drive": "%(cdrive)s",
"addresses": {
"private": [
{
@@ -22,13 +22,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-console-auth-tokens/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-console-output/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-console-output/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl
index e6c046ceb4..27557a3e9f 100644
--- a/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-create-backup/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-deferred-delete/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl
new file mode 100644
index 0000000000..7ba9398ba6
--- /dev/null
+++ b/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "evacuate": {
+ "admin_password": "%(adminPass)s",
+ "on_shared_storage": "%(onSharedStorage)s"
+ }
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl
new file mode 100644
index 0000000000..e6d6ad9ed1
--- /dev/null
+++ b/nova/tests/integrated/v3/api_samples/os-evacuate/server-evacuate-find-host-resp.json.tpl
@@ -0,0 +1,3 @@
+{
+ "admin_password": "%(password)s"
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-evacuate/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl
index e9852475e0..a835dbf54f 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-get-resp.json.tpl
@@ -22,13 +22,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(uuid)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl
index 1d69092b09..2d2b4a8c1b 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-availability-zone/servers-detail-resp.json.tpl
@@ -23,13 +23,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(uuid)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl
index acb0ed6c3d..3244c1736e 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-get-resp.json.tpl
@@ -1,8 +1,8 @@
{
"server": {
- "os-extended-server-attributes:host": "%(compute_host)s",
- "os-extended-server-attributes:hypervisor_hostname": "%(hypervisor_hostname)s",
- "os-extended-server-attributes:instance_name": "%(instance_name)s",
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s",
"updated": "%(isotime)s",
"created": "%(isotime)s",
"addresses": {
@@ -24,13 +24,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(uuid)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl
index 115bba4df4..18474a64b5 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-server-attributes/servers-detail-resp.json.tpl
@@ -1,9 +1,9 @@
{
"servers": [
{
- "os-extended-server-attributes:host": "%(compute_host)s",
- "os-extended-server-attributes:hypervisor_hostname": "%(hypervisor_hostname)s",
- "os-extended-server-attributes:instance_name": "%(instance_name)s",
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s",
"updated": "%(isotime)s",
"created": "%(isotime)s",
"addresses": {
@@ -25,13 +25,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(uuid)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl
index a416cc6fc5..fd870e24fd 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-status/server-get-resp.json.tpl
@@ -1,9 +1,9 @@
{
"server": {
- "os-extended-status:task_state": null,
- "os-extended-status:vm_state": "active",
- "os-extended-status:power_state": 1,
- "os-extended-status:locked_by": null,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:locked_by": null,
"updated": "%(isotime)s",
"created": "%(isotime)s",
"addresses": {
@@ -25,13 +25,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(uuid)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-status/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl
index 06eb488262..5655465683 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-status/servers-detail-resp.json.tpl
@@ -1,10 +1,10 @@
{
"servers": [
{
- "os-extended-status:task_state": null,
- "os-extended-status:vm_state": "active",
- "os-extended-status:power_state": 1,
- "os-extended-status:locked_by": null,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:locked_by": null,
"updated": "%(isotime)s",
"created": "%(isotime)s",
"addresses": {
@@ -26,13 +26,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(uuid)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl
index b02b77b365..3b38100e51 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-get-resp.json.tpl
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-volumes/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl
index 3d6b230170..e2561549a4 100644
--- a/nova/tests/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-extended-volumes/servers-detail-resp.json.tpl
@@ -22,13 +22,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(uuid)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
index ab38bd53f5..3a69fcd321 100644
--- a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-get-resp.json.tpl
@@ -12,13 +12,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
index 185905c922..353d29f480 100644
--- a/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-hide-server-addresses/servers-details-resp.json.tpl
@@ -12,13 +12,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
index 0678922114..2777eb4887 100644
--- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-detail-resp.json.tpl
@@ -3,13 +3,15 @@
{
"cpu_info": "?",
"current_workload": 0,
+ "state": "up",
+ "status": "enabled",
"disk_available_least": 0,
"host_ip": "%(ip)s",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": %(hypervisor_id)s,
"local_gb": 1028,
"local_gb_used": 0,
@@ -18,7 +20,8 @@
"running_vms": 0,
"service": {
"host": "%(host_name)s",
- "id": 2
+ "id": 2,
+ "disabled_reason": null
},
"vcpus": 1,
"vcpus_used": 0
diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
index 8d94021274..710cdfcf9c 100644
--- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-list-resp.json.tpl
@@ -2,7 +2,9 @@
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
+ "state": "up",
+ "status": "enabled",
"id": 1
}
]
-}
\ No newline at end of file
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
index 8d94021274..375627499d 100644
--- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-search-resp.json.tpl
@@ -2,7 +2,9 @@
"hypervisors": [
{
"hypervisor_hostname": "fake-mini",
- "id": 1
+ "id": 1,
+ "state": "up",
+ "status": "enabled"
}
]
-}
\ No newline at end of file
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
index 934ef0c02d..710b05b930 100644
--- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-servers-resp.json.tpl
@@ -2,6 +2,8 @@
"hypervisor": {
"hypervisor_hostname": "fake-mini",
"id": 1,
+ "state": "up",
+ "status": "enabled",
"servers": []
}
-}
\ No newline at end of file
+}
diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
index 6b287a838c..f125da01af 100644
--- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-show-resp.json.tpl
@@ -3,12 +3,14 @@
"cpu_info": "?",
"current_workload": 0,
"disk_available_least": 0,
+ "state": "up",
+ "status": "enabled",
"host_ip": "%(ip)s",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": %(hypervisor_id)s,
"local_gb": 1028,
"local_gb_used": 0,
@@ -17,7 +19,8 @@
"running_vms": 0,
"service": {
"host": "%(host_name)s",
- "id": 2
+ "id": 2,
+ "disabled_reason": null
},
"vcpus": 1,
"vcpus_used": 0
diff --git a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
index 8a36c65f23..e2f6d2e47e 100644
--- a/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-hypervisors/hypervisors-uptime-resp.json.tpl
@@ -2,6 +2,8 @@
"hypervisor": {
"hypervisor_hostname": "fake-mini",
"id": %(hypervisor_id)s,
+ "state": "up",
+ "status": "enabled",
"uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14"
}
}
diff --git a/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl
index e6c046ceb4..27557a3e9f 100644
--- a/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-lock-server/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-migrate-server/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-multinic/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-multinic/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl
index e6c046ceb4..27557a3e9f 100644
--- a/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-pause-server/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl
index 9eb9e8c9c0..f2bf2bc02c 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-detail-resp.json.tpl
@@ -2,6 +2,8 @@
"hypervisors": [
{
"cpu_info": "?",
+ "state": "up",
+ "status": "enabled",
"current_workload": 0,
"disk_available_least": 0,
"host_ip": "%(ip)s",
@@ -9,7 +11,7 @@
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
@@ -30,7 +32,8 @@
"running_vms": 0,
"service": {
"host": "043b3cacf6f34c90a7245151fc8ebcda",
- "id": 2
+ "id": 2,
+ "disabled_reason": null
},
"vcpus": 1,
"vcpus_used": 0
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl
index 8c626fd570..3c0fc0abcd 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-pci/hypervisors-pci-show-resp.json.tpl
@@ -2,13 +2,15 @@
"hypervisor": {
"cpu_info": "?",
"current_workload": 0,
+ "state": "up",
+ "status": "enabled",
"disk_available_least": 0,
"host_ip": "%(ip)s",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
@@ -29,7 +31,8 @@
"running_vms": 0,
"service": {
"host": "043b3cacf6f34c90a7245151fc8ebcda",
- "id": 2
+ "id": 2,
+ "disabled_reason": null
},
"vcpus": 1,
"vcpus_used": 0
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl
index 920f9c8792..84ad950359 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-pci/server-get-resp.json.tpl
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/server-post-req.json.tpl
index e6c046ceb4..27557a3e9f 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-pci/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-pci/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl
index 265045ff7b..3b2a344d0c 100644
--- a/nova/tests/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-pci/servers-detail-resp.json.tpl
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-remote-consoles/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl b/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl
index 7eae80526d..a7fb13e958 100644
--- a/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-rescue.json.tpl
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl b/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl
index e56a1a5d77..e3c28e9de2 100644
--- a/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-rescue/server-get-resp-unrescue.json.tpl
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-rescue/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-rescue/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl
index c98a0a5853..a381df7444 100644
--- a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-req.json.tpl
@@ -1,10 +1,10 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/openstack/images/%(image_id)s",
- "flavor_ref" : "%(host)s/openstack/flavors/1",
- "os-scheduler-hints:scheduler_hints": {
- "same_host": "%(uuid)s"
- }
+ "imageRef" : "%(glance_host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1"
+ },
+ "OS-SCH-HNT:scheduler_hints": {
+ "same_host": "%(uuid)s"
}
}
diff --git a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-scheduler-hints/scheduler-hints-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl
index fc3bd9ea1d..8c11a95a24 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-security-groups/server-get-resp.json.tpl
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl
index 5a2262df91..de72904cc5 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/openstack/images/%(image_id)s",
- "flavor_ref" : "%(host)s/openstack/flavors/1",
+ "imageRef" : "%(glance_host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl
index 7f9843c505..08ca8539d9 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-security-groups/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl
index 3306b40a9c..37e4621c73 100644
--- a/nova/tests/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-security-groups/servers-detail-resp.json.tpl
@@ -22,13 +22,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(uuid)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-server-diagnostics/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-server-external-events/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl
index 6732556224..7cc853f8fd 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-server-usage/server-get-resp.json.tpl
@@ -22,13 +22,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-server-usage/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl
index 1adbde9767..458276dc29 100644
--- a/nova/tests/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-server-usage/servers-detail-resp.json.tpl
@@ -32,7 +32,7 @@
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
@@ -50,7 +50,7 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"metadata": {
"My Server Name": "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl
index b1013defdf..6f9336d3c0 100644
--- a/nova/tests/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-shelve/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/openstack/images/%(image_id)s",
- "flavor_ref" : "%(host)s/openstack/flavors/1",
+ "imageRef" : "%(glance_host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/nova/tests/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-shelve/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-suspend-server/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl
index 9e6d436840..37f0a75d0a 100644
--- a/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(host)s/openstack/images/%(image_id)s",
- "flavor_ref" : "%(host)s/openstack/flavors/1",
+ "imageRef" : "%(host)s/openstack/images/%(image_id)s",
+ "flavorRef" : "%(host)s/openstack/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
},
diff --git a/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl
index eb3f76ebe6..adfaaa381e 100644
--- a/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/os-user-data/userdata-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/server-ips/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/server-ips/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/server-ips/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/server-ips/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/server-ips/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/server-metadata/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/server-metadata/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl
index 68a5938ef2..8c8c124b0a 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral-resp.json.tpl
@@ -10,7 +10,7 @@
}
]
},
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"created": "%(isotime)s",
"flavor": {
"id": "1",
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(uuid)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl
index 7b042642b0..8f38088c19 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-preserve-ephemeral.json.tpl
@@ -1,8 +1,8 @@
{
"rebuild" : {
- "image_ref" : "%(glance_host)s/images/%(uuid)s",
+ "imageRef" : "%(glance_host)s/images/%(uuid)s",
"name" : "%(name)s",
- "admin_password" : "%(pass)s",
+ "adminPass" : "%(pass)s",
"metadata" : {
"meta_var" : "meta_val"
},
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl
index 68a5938ef2..8c8c124b0a 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild-resp.json.tpl
@@ -10,7 +10,7 @@
}
]
},
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"created": "%(isotime)s",
"flavor": {
"id": "1",
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(uuid)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl
index f1f21a3401..6385f10593 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/servers/server-action-rebuild.json.tpl
@@ -1,8 +1,8 @@
{
"rebuild" : {
- "image_ref" : "%(glance_host)s/images/%(uuid)s",
+ "imageRef" : "%(glance_host)s/images/%(uuid)s",
"name" : "%(name)s",
- "admin_password" : "%(pass)s",
+ "adminPass" : "%(pass)s",
"metadata" : {
"meta_var" : "meta_val"
},
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-action-resize.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-action-resize.json.tpl
index 368e6bc076..468a88da24 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-action-resize.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/servers/server-action-resize.json.tpl
@@ -1,5 +1,5 @@
{
"resize" : {
- "flavor_ref" : "%(id)s"
+ "flavorRef" : "%(id)s"
}
}
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-get-resp.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-get-resp.json.tpl
index 37cca80f1b..b37fa3d128 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-get-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/servers/server-get-resp.json.tpl
@@ -20,13 +20,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-post-req.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-post-req.json.tpl
index d9a7537dfb..ab0a3bb797 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-post-req.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/servers/server-post-req.json.tpl
@@ -1,8 +1,8 @@
{
"server" : {
"name" : "new-server-test",
- "image_ref" : "%(glance_host)s/images/%(image_id)s",
- "flavor_ref" : "%(host)s/flavors/1",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
"metadata" : {
"My Server Name" : "Apache1"
}
diff --git a/nova/tests/integrated/v3/api_samples/servers/server-post-resp.json.tpl b/nova/tests/integrated/v3/api_samples/servers/server-post-resp.json.tpl
index 7af0df5ec0..71654b4b8a 100644
--- a/nova/tests/integrated/v3/api_samples/servers/server-post-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/servers/server-post-resp.json.tpl
@@ -1,6 +1,6 @@
{
"server": {
- "admin_password": "%(password)s",
+ "adminPass": "%(password)s",
"id": "%(id)s",
"links": [
{
diff --git a/nova/tests/integrated/v3/api_samples/servers/servers-details-resp.json.tpl b/nova/tests/integrated/v3/api_samples/servers/servers-details-resp.json.tpl
index c72018e107..a3d63d064f 100644
--- a/nova/tests/integrated/v3/api_samples/servers/servers-details-resp.json.tpl
+++ b/nova/tests/integrated/v3/api_samples/servers/servers-details-resp.json.tpl
@@ -21,13 +21,13 @@
}
]
},
- "host_id": "%(hostid)s",
+ "hostId": "%(hostid)s",
"id": "%(id)s",
"image": {
"id": "%(uuid)s",
"links": [
{
- "href": "%(glance_host)s/images/%(uuid)s",
+ "href": "%(host)s/images/%(uuid)s",
"rel": "bookmark"
}
]
diff --git a/nova/tests/integrated/v3/test_admin_password.py b/nova/tests/integrated/v3/test_admin_password.py
index fb4a97e7a9..7b2a858552 100644
--- a/nova/tests/integrated/v3/test_admin_password.py
+++ b/nova/tests/integrated/v3/test_admin_password.py
@@ -25,5 +25,5 @@ def test_server_password(self):
response = self._do_post('servers/%s/action' % uuid,
'admin-password-change-password',
subs)
- self.assertEqual(response.status, 204)
+ self.assertEqual(response.status, 202)
self.assertEqual(response.read(), "")
diff --git a/nova/tests/integrated/v3/test_agents.py b/nova/tests/integrated/v3/test_agents.py
index 99a53bcf51..bdb3b4e220 100644
--- a/nova/tests/integrated/v3/test_agents.py
+++ b/nova/tests/integrated/v3/test_agents.py
@@ -24,7 +24,7 @@ class AgentsJsonTest(api_sample_base.ApiSampleTestBaseV3):
def setUp(self):
super(AgentsJsonTest, self).setUp()
- fake_agents_list = [{'url': 'xxxxxxxxxxxx',
+ fake_agents_list = [{'url': 'http://example.com/path/to/resource',
'hypervisor': 'hypervisor',
'architecture': 'x86',
'os': 'os',
@@ -65,7 +65,7 @@ def fake_agent_build_destroy(context, agent_update_id):
def test_agent_create(self):
# Creates a new agent build.
- project = {'url': 'xxxxxxxxxxxx',
+ project = {'url': 'http://example.com/path/to/resource',
'hypervisor': 'hypervisor',
'architecture': 'x86',
'os': 'os',
@@ -85,7 +85,7 @@ def test_agent_update(self):
# Update an existing agent build.
agent_id = 1
subs = {'version': '7.0',
- 'url': 'xxx://xxxx/xxx/xxx',
+ 'url': 'http://example.com/path/to/resource',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}
response = self._do_put('os-agents/%s' % agent_id,
'agent-update-put-req', subs)
diff --git a/nova/tests/integrated/v3/test_console_auth_tokens.py b/nova/tests/integrated/v3/test_console_auth_tokens.py
index a7cc228a57..100bc84da9 100644
--- a/nova/tests/integrated/v3/test_console_auth_tokens.py
+++ b/nova/tests/integrated/v3/test_console_auth_tokens.py
@@ -12,9 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import re
+from nova.openstack.common import jsonutils
from nova.tests.integrated.v3 import test_servers
@@ -23,7 +23,7 @@ class ConsoleAuthTokensSampleJsonTests(test_servers.ServersSampleBase):
extra_extensions_to_load = ["os-remote-consoles"]
def _get_console_url(self, data):
- return json.loads(data)["console"]["url"]
+ return jsonutils.loads(data)["console"]["url"]
def _get_console_token(self, uuid):
response = self._do_post('servers/%s/action' % uuid,
diff --git a/nova/tests/integrated/v3/test_evacuate.py b/nova/tests/integrated/v3/test_evacuate.py
index e7a5931697..b4666b0263 100644
--- a/nova/tests/integrated/v3/test_evacuate.py
+++ b/nova/tests/integrated/v3/test_evacuate.py
@@ -13,8 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
+
from nova.compute import api as compute_api
-from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import manager as compute_manager
from nova.servicegroup import api as service_group_api
from nova.tests.integrated.v3 import test_servers
@@ -22,15 +24,9 @@
class EvacuateJsonTest(test_servers.ServersSampleBase):
extension_name = "os-evacuate"
- def test_server_evacuate(self):
- uuid = self._post_server()
-
- # Note (wingwj): The host can't be the same one.
- req_subs = {
- 'host': 'testHost',
- "adminPass": "MySecretPass",
- "onSharedStorage": 'False'
- }
+ def _test_evacuate(self, req_subs, server_req, server_resp,
+ expected_resp_code):
+ self.uuid = self._post_server()
def fake_service_is_up(self, service):
"""Simulate validation of instance host is down."""
@@ -44,24 +40,52 @@ def fake_service_get_by_compute_host(self, context, host):
'zone': 'nova'
}
- def fake_rebuild_instance(_self, ctxt, instance, new_pass,
- injected_files, image_ref, orig_image_ref,
- orig_sys_metadata, bdms, recreate=False,
- on_shared_storage=False, host=None,
- preserve_ephemeral=False, kwargs=None):
- """Simulate that given parameters are correct."""
- self.assertEqual(uuid, instance["uuid"])
- self.assertEqual(new_pass, "MySecretPass")
- self.assertEqual(host, "testHost")
+ def fake_check_instance_exists(self, context, instance):
+ """Simulate validation of instance does not exist."""
+ return False
self.stubs.Set(service_group_api.API, 'service_is_up',
fake_service_is_up)
self.stubs.Set(compute_api.HostAPI, 'service_get_by_compute_host',
fake_service_get_by_compute_host)
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'rebuild_instance',
- fake_rebuild_instance)
+ self.stubs.Set(compute_manager.ComputeManager,
+ '_check_instance_exists',
+ fake_check_instance_exists)
- response = self._do_post('servers/%s/action' % uuid,
- 'server-evacuate-req', req_subs)
+ response = self._do_post('servers/%s/action' % self.uuid,
+ server_req, req_subs)
subs = self._get_regexes()
- self._verify_response('server-evacuate-resp', subs, response, 202)
+ self._verify_response(server_resp, subs, response, expected_resp_code)
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate(self, rebuild_mock):
+ # Note (wingwj): The host can't be the same one
+ req_subs = {
+ 'host': 'testHost',
+ "adminPass": "MySecretPass",
+ "onSharedStorage": 'False'
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-req',
+ 'server-evacuate-resp', 202)
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=False, preserve_ephemeral=mock.ANY,
+ host='testHost')
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate_find_host(self, rebuild_mock):
+ req_subs = {
+ "adminPass": "MySecretPass",
+ "onSharedStorage": 'False'
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-find-host-req',
+ 'server-evacuate-find-host-resp', 202)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=False, preserve_ephemeral=mock.ANY,
+ host=None)
diff --git a/nova/tests/integrated/v3/test_extended_volumes.py b/nova/tests/integrated/v3/test_extended_volumes.py
index 60b35d7e78..88c5da483f 100644
--- a/nova/tests/integrated/v3/test_extended_volumes.py
+++ b/nova/tests/integrated/v3/test_extended_volumes.py
@@ -31,7 +31,7 @@ class ExtendedVolumesSampleJsonTests(test_servers.ServersSampleBase):
def _stub_compute_api_get_instance_bdms(self, server_id):
def fake_bdms_get_all_by_instance(context, instance_uuid,
- use_slave=False):
+ use_subordinate=False):
bdms = [
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803',
diff --git a/nova/tests/integrated/v3/test_migrate_server.py b/nova/tests/integrated/v3/test_migrate_server.py
index a75a918dbf..a6530ef10e 100644
--- a/nova/tests/integrated/v3/test_migrate_server.py
+++ b/nova/tests/integrated/v3/test_migrate_server.py
@@ -18,6 +18,7 @@
from nova.conductor import manager as conductor_manager
from nova import db
from nova.tests.integrated.v3 import test_servers
+from nova import utils
class MigrateServerSamplesJsonTest(test_servers.ServersSampleBase):
@@ -58,7 +59,8 @@ def fake_get_compute(context, host):
report_count=1,
updated_at='foo',
hypervisor_type='bar',
- hypervisor_version='1',
+ hypervisor_version=utils.convert_version_to_int(
+ '1.0'),
disabled=False)
return {'compute_node': [service]}
self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
diff --git a/nova/tests/integrated/v3/test_pci.py b/nova/tests/integrated/v3/test_pci.py
index e6ba2b0484..91aba3f9c7 100644
--- a/nova/tests/integrated/v3/test_pci.py
+++ b/nova/tests/integrated/v3/test_pci.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
+
from nova import db
from nova.openstack.common import jsonutils
from nova.tests.integrated.v3 import api_sample_base
@@ -83,19 +85,23 @@ def setUp(self):
"current_workload": 0,
"disk_available_least": 0,
"host_ip": "1.1.1.1",
+ "state": "up",
+ "status": "enabled",
"free_disk_gb": 1028,
"free_ram_mb": 7680,
"hypervisor_hostname": "fake-mini",
"hypervisor_type": "fake",
- "hypervisor_version": 1,
+ "hypervisor_version": 1000,
"id": 1,
"local_gb": 1028,
"local_gb_used": 0,
"memory_mb": 8192,
"memory_mb_used": 512,
"running_vms": 0,
- "service": {"host": '043b3cacf6f34c90a724'
- '5151fc8ebcda'},
+ "service": {"host": '043b3cacf6f34c90a'
+ '7245151fc8ebcda',
+ "disabled": False,
+ "disabled_reason": None},
"vcpus": 1,
"vcpus_used": 0,
"service_id": 2,
@@ -110,13 +116,12 @@ def setUp(self):
' "0x1"]]',
"key1": "value1"}}]}
- def test_pci_show(self):
- def fake_compute_node_get(context, id):
- self.fake_compute_node['pci_stats'] = jsonutils.dumps(
- self.fake_compute_node['pci_stats'])
- return self.fake_compute_node
-
- self.stubs.Set(db, 'compute_node_get', fake_compute_node_get)
+ @mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
+ @mock.patch("nova.db.compute_node_get")
+ def test_pci_show(self, mock_db, mock_service):
+ self.fake_compute_node['pci_stats'] = jsonutils.dumps(
+ self.fake_compute_node['pci_stats'])
+ mock_db.return_value = self.fake_compute_node
hypervisor_id = 1
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs = {
@@ -126,13 +131,13 @@ def fake_compute_node_get(context, id):
self._verify_response('hypervisors-pci-show-resp',
subs, response, 200)
- def test_pci_detail(self):
- def fake_compute_node_get_all(context):
- self.fake_compute_node['pci_stats'] = jsonutils.dumps(
- self.fake_compute_node['pci_stats'])
- return [self.fake_compute_node]
+ @mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
+ @mock.patch("nova.db.compute_node_get_all")
+ def test_pci_detail(self, mock_db, mock_service):
+ self.fake_compute_node['pci_stats'] = jsonutils.dumps(
+ self.fake_compute_node['pci_stats'])
- self.stubs.Set(db, 'compute_node_get_all', fake_compute_node_get_all)
+ mock_db.return_value = [self.fake_compute_node]
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
diff --git a/nova/tests/keymgr/test_key.py b/nova/tests/keymgr/test_key.py
index a086c6527c..14766fd201 100644
--- a/nova/tests/keymgr/test_key.py
+++ b/nova/tests/keymgr/test_key.py
@@ -57,11 +57,11 @@ def test_get_encoded(self):
def test___eq__(self):
self.assertTrue(self.key == self.key)
- self.assertFalse(self.key == None)
+ self.assertFalse(self.key is None)
self.assertFalse(None == self.key)
def test___ne__(self):
self.assertFalse(self.key != self.key)
- self.assertTrue(self.key != None)
+ self.assertTrue(self.key is not None)
self.assertTrue(None != self.key)
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index 36979e9679..ba9b7d8778 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -37,6 +37,7 @@
from nova.tests import fake_instance
from nova.tests.objects import test_fixed_ip
from nova.tests.objects import test_flavor
+from nova.tests.objects import test_virtual_interface
from nova import utils
FAKE_UUID = 'a47ae74e-ab08-547f-9eee-ffd23fc46c16'
@@ -73,6 +74,68 @@ def setUp(self):
self.context = context.RequestContext('fake-user',
'fake-project')
+ @mock.patch('nova.objects.NetworkList.get_all')
+ def test_get_all(self, mock_get_all):
+ mock_get_all.return_value = mock.sentinel.get_all
+ self.assertEqual(mock.sentinel.get_all,
+ self.network_api.get_all(self.context))
+ mock_get_all.assert_called_once_with(self.context,
+ project_only=True)
+
+ @mock.patch('nova.objects.NetworkList.get_all')
+ def test_get_all_no_networks(self, mock_get_all):
+ mock_get_all.side_effect = exception.NoNetworksFound
+ self.assertEqual([], self.network_api.get_all(self.context))
+ mock_get_all.assert_called_once_with(self.context,
+ project_only=True)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ def test_get(self, mock_get):
+ mock_get.return_value = mock.sentinel.get_by_uuid
+ with mock.patch.object(self.context, 'elevated') as elevated:
+ elevated.return_value = mock.sentinel.elevated_context
+ self.assertEqual(mock.sentinel.get_by_uuid,
+ self.network_api.get(self.context, 'fake-uuid'))
+ mock_get.assert_called_once_with(mock.sentinel.elevated_context,
+ 'fake-uuid')
+
+ @mock.patch('nova.objects.Network.get_by_id')
+ @mock.patch('nova.db.virtual_interface_get_by_instance')
+ def test_get_vifs_by_instance(self, mock_get_by_instance,
+ mock_get_by_id):
+ mock_get_by_instance.return_value = [
+ dict(test_virtual_interface.fake_vif,
+ network_id=123)]
+ mock_get_by_id.return_value = objects.Network()
+ mock_get_by_id.return_value.uuid = mock.sentinel.network_uuid
+ instance = objects.Instance(uuid=mock.sentinel.inst_uuid)
+ vifs = self.network_api.get_vifs_by_instance(self.context,
+ instance)
+ self.assertEqual(1, len(vifs))
+ self.assertEqual(123, vifs[0].network_id)
+ self.assertEqual(str(mock.sentinel.network_uuid), vifs[0].net_uuid)
+ mock_get_by_instance.assert_called_once_with(
+ self.context, str(mock.sentinel.inst_uuid), use_subordinate=False)
+ mock_get_by_id.assert_called_once_with(self.context, 123,
+ project_only='allow_none')
+
+ @mock.patch('nova.objects.Network.get_by_id')
+ @mock.patch('nova.db.virtual_interface_get_by_address')
+ def test_get_vif_by_mac_address(self, mock_get_by_address,
+ mock_get_by_id):
+ mock_get_by_address.return_value = dict(
+ test_virtual_interface.fake_vif, network_id=123)
+ mock_get_by_id.return_value = objects.Network(
+ uuid=mock.sentinel.network_uuid)
+ vif = self.network_api.get_vif_by_mac_address(self.context,
+ mock.sentinel.mac)
+ self.assertEqual(123, vif.network_id)
+ self.assertEqual(str(mock.sentinel.network_uuid), vif.net_uuid)
+ mock_get_by_address.assert_called_once_with(self.context,
+ mock.sentinel.mac)
+ mock_get_by_id.assert_called_once_with(self.context, 123,
+ project_only='allow_none')
+
def test_allocate_for_instance_handles_macs_passed(self):
# If a macs argument is supplied to the 'nova-network' API, it is just
# ignored. This test checks that the call down to the rpcapi layer
@@ -91,8 +154,10 @@ def test_allocate_for_instance_handles_macs_passed(self):
flavor = flavors.get_default_flavor()
flavor['rxtx_factor'] = 0
sys_meta = flavors.save_flavor_info({}, flavor)
- instance = dict(id='id', uuid='uuid', project_id='project_id',
+ instance = dict(id=1, uuid='uuid', project_id='project_id',
host='host', system_metadata=utils.dict_to_metadata(sys_meta))
+ instance = fake_instance.fake_instance_obj(
+ self.context, expected_attrs=['system_metadata'], **instance)
self.network_api.allocate_for_instance(
self.context, instance, 'vpn', 'requested_networks', macs=macs)
@@ -107,8 +172,10 @@ def fake_associate(*args, **kwargs):
self.stubs.Set(floating_ips.FloatingIP, 'associate_floating_ip',
fake_associate)
- def fake_instance_get_by_uuid(context, instance_uuid):
- return {'uuid': instance_uuid}
+ def fake_instance_get_by_uuid(context, instance_uuid,
+ columns_to_join=None,
+ use_subordinate=None):
+ return fake_instance.fake_db_instance(uuid=instance_uuid)
self.stubs.Set(self.network_api.db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
@@ -159,6 +226,53 @@ def test_get_floating_ip_invalid_id(self):
self.network_api.get_floating_ip,
self.context, '123zzz')
+ @mock.patch('nova.objects.FloatingIP.get_by_id')
+ def test_get_floating_ip(self, mock_get):
+ floating = mock.sentinel.floating
+ mock_get.return_value = floating
+ self.assertEqual(floating,
+ self.network_api.get_floating_ip(self.context, 123))
+ mock_get.assert_called_once_with(self.context, 123)
+
+ @mock.patch('nova.objects.FloatingIP.get_pool_names')
+ def test_get_floating_ip_pools(self, mock_get):
+ pools = ['foo', 'bar']
+ mock_get.return_value = pools
+ self.assertEqual(pools,
+ self.network_api.get_floating_ip_pools(
+ self.context))
+
+ @mock.patch('nova.objects.FloatingIP.get_by_address')
+ def test_get_floating_ip_by_address(self, mock_get):
+ floating = mock.sentinel.floating
+ mock_get.return_value = floating
+ self.assertEqual(floating,
+ self.network_api.get_floating_ip_by_address(
+ self.context, mock.sentinel.address))
+ mock_get.assert_called_once_with(self.context,
+ mock.sentinel.address)
+
+ @mock.patch('nova.objects.FloatingIPList.get_by_project')
+ def test_get_floating_ips_by_project(self, mock_get):
+ floatings = mock.sentinel.floating_ips
+ mock_get.return_value = floatings
+ self.assertEqual(floatings,
+ self.network_api.get_floating_ips_by_project(
+ self.context))
+ mock_get.assert_called_once_with(self.context,
+ self.context.project_id)
+
+ @mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
+ def test_get_floating_ips_by_fixed_address(self, mock_get):
+ floatings = [objects.FloatingIP(id=1, address='1.2.3.4'),
+ objects.FloatingIP(id=2, address='5.6.7.8')]
+ mock_get.return_value = floatings
+ self.assertEqual(['1.2.3.4', '5.6.7.8'],
+ self.network_api.get_floating_ips_by_fixed_address(
+ self.context, mock.sentinel.fixed_address))
+ mock_get.assert_called_once_with(self.context,
+ mock.sentinel.fixed_address)
+
def _stub_migrate_instance_calls(self, method, multi_host, info):
fake_flavor = flavors.get_default_flavor()
fake_flavor['rxtx_factor'] = 1.21
@@ -274,21 +388,52 @@ def test_is_multi_host_network_has_project_id_multi(self):
def test_is_multi_host_network_has_project_id_non_multi(self):
self._test_is_multi_host_network_has_project_id(False)
- def test_network_disassociate_project(self):
- def fake_network_disassociate(ctx, network_id, disassociate_host,
- disassociate_project):
- self.assertEqual(network_id, 1)
- self.assertEqual(disassociate_host, False)
- self.assertEqual(disassociate_project, True)
-
- def fake_get(context, network_uuid):
- return {'id': 1}
-
- self.stubs.Set(self.network_api.db, 'network_disassociate',
- fake_network_disassociate)
- self.stubs.Set(self.network_api, 'get', fake_get)
-
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.disassociate')
+ def test_network_disassociate_project(self, mock_disassociate, mock_get):
+ net_obj = objects.Network(context=self.context, id=1)
+ mock_get.return_value = net_obj
self.network_api.associate(self.context, FAKE_UUID, project=None)
+ mock_disassociate.assert_called_once_with(self.context, net_obj.id,
+ host=False, project=True)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.disassociate')
+ def test_network_disassociate_host(self, mock_disassociate, mock_get):
+ net_obj = objects.Network(context=self.context, id=1)
+ mock_get.return_value = net_obj
+ self.network_api.associate(self.context, FAKE_UUID, host=None)
+ mock_disassociate.assert_called_once_with(self.context, net_obj.id,
+ host=True, project=False)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.associate')
+ def test_network_associate_project(self, mock_associate, mock_get):
+ net_obj = objects.Network(context=self.context, id=1)
+ mock_get.return_value = net_obj
+ project = mock.sentinel.project
+ self.network_api.associate(self.context, FAKE_UUID, project=project)
+ mock_associate.assert_called_once_with(self.context, project,
+ network_id=net_obj.id,
+ force=True)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.save')
+ def test_network_associate_host(self, mock_save, mock_get):
+ net_obj = objects.Network(context=self.context, id=1)
+ mock_get.return_value = net_obj
+ host = str(mock.sentinel.host)
+ self.network_api.associate(self.context, FAKE_UUID, host=host)
+ mock_save.assert_called_once_with()
+ self.assertEqual(host, net_obj.host)
+
+ @mock.patch('nova.objects.Network.get_by_uuid')
+ @mock.patch('nova.objects.Network.disassociate')
+ def test_network_disassociate(self, mock_disassociate, mock_get):
+ mock_get.return_value = objects.Network(context=self.context, id=123)
+ self.network_api.disassociate(self.context, FAKE_UUID)
+ mock_disassociate.assert_called_once_with(self.context, 123,
+ project=True, host=True)
def _test_refresh_cache(self, method, *args, **kwargs):
# This test verifies that no call to get_instance_nw_info() is made
@@ -339,6 +484,33 @@ def test_get_fixed_ip_by_address(self, fip_get):
'fake-addr')
self.assertIsInstance(fip, objects.FixedIP)
+ @mock.patch('nova.objects.FixedIP.get_by_id')
+ def test_get_fixed_ip(self, mock_get_by_id):
+ mock_get_by_id.return_value = mock.sentinel.fixed_ip
+ self.assertEqual(mock.sentinel.fixed_ip,
+ self.network_api.get_fixed_ip(self.context,
+ mock.sentinel.id))
+ mock_get_by_id.assert_called_once_with(self.context, mock.sentinel.id)
+
+ @mock.patch('nova.objects.FixedIP.get_by_floating_address')
+ def test_get_instance_by_floating_address(self, mock_get_by_floating):
+ mock_get_by_floating.return_value = objects.FixedIP(
+ instance_uuid = mock.sentinel.instance_uuid)
+ self.assertEqual(str(mock.sentinel.instance_uuid),
+ self.network_api.get_instance_id_by_floating_address(
+ self.context, mock.sentinel.floating))
+ mock_get_by_floating.assert_called_once_with(self.context,
+ mock.sentinel.floating)
+
+ @mock.patch('nova.objects.FixedIP.get_by_floating_address')
+ def test_get_instance_by_floating_address_none(self, mock_get_by_floating):
+ mock_get_by_floating.return_value = None
+ self.assertEqual(None,
+ self.network_api.get_instance_id_by_floating_address(
+ self.context, mock.sentinel.floating))
+ mock_get_by_floating.assert_called_once_with(self.context,
+ mock.sentinel.floating)
+
@mock.patch('nova.network.api.API')
@mock.patch('nova.db.instance_info_cache_update')
diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py
index 76be0dd755..07f23b2743 100644
--- a/nova/tests/network/test_linux_net.py
+++ b/nova/tests/network/test_linux_net.py
@@ -88,7 +88,11 @@
'vlan': None,
'host': None,
'project_id': 'fake_project',
- 'vpn_public_address': '192.168.0.2'},
+ 'vpn_public_address': '192.168.0.2',
+ 'mtu': None,
+ 'dhcp_server': '192.168.0.1',
+ 'enable_dhcp': True,
+ 'share_address': False},
{'id': 1,
'uuid': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'label': 'test1',
@@ -110,7 +114,11 @@
'vlan': None,
'host': None,
'project_id': 'fake_project',
- 'vpn_public_address': '192.168.1.2'}]
+ 'vpn_public_address': '192.168.1.2',
+ 'mtu': None,
+ 'dhcp_server': '192.168.1.1',
+ 'enable_dhcp': True,
+ 'share_address': False}]
fixed_ips = [{'id': 0,
@@ -279,7 +287,7 @@ def setUp(self):
self.context = context.RequestContext('testuser', 'testproject',
is_admin=True)
- def get_vifs(_context, instance_uuid, use_slave):
+ def get_vifs(_context, instance_uuid, use_subordinate):
return [vif for vif in vifs if vif['instance_uuid'] ==
instance_uuid]
@@ -290,27 +298,44 @@ def get_instance(_context, instance_id):
self.stubs.Set(db, 'instance_get', get_instance)
self.stubs.Set(db, 'network_get_associated_fixed_ips', get_associated)
- def _test_add_snat_rule(self, expected):
+ def _test_add_snat_rule(self, expected, is_external):
+
def verify_add_rule(chain, rule):
self.assertEqual(chain, 'snat')
self.assertEqual(rule, expected)
+ self.called = True
self.stubs.Set(linux_net.iptables_manager.ipv4['nat'],
'add_rule', verify_add_rule)
- linux_net.add_snat_rule('10.0.0.0/24')
+ self.called = False
+ linux_net.add_snat_rule('10.0.0.0/24', is_external)
+ if expected:
+ self.assertTrue(self.called)
- def test_add_snat_rule(self):
+ def test_add_snat_rule_no_ext(self):
self.flags(routing_source_ip='10.10.10.1')
expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 '
'-j SNAT --to-source 10.10.10.1 -o eth0')
- self._test_add_snat_rule(expected)
+ self._test_add_snat_rule(expected, False)
+
+ def test_add_snat_rule_ext(self):
+ self.flags(routing_source_ip='10.10.10.1')
+ expected = ()
+ self._test_add_snat_rule(expected, True)
- def test_add_snat_rule_snat_range(self):
+ def test_add_snat_rule_snat_range_no_ext(self):
self.flags(routing_source_ip='10.10.10.1',
force_snat_range=['10.10.10.0/24'])
- expected = ('-s 10.0.0.0/24 -d 10.10.10.0/24 '
+ expected = ('-s 10.0.0.0/24 -d 0.0.0.0/0 '
'-j SNAT --to-source 10.10.10.1 -o eth0')
- self._test_add_snat_rule(expected)
+ self._test_add_snat_rule(expected, False)
+
+ def test_add_snat_rule_snat_range_ext(self):
+ self.flags(routing_source_ip='10.10.10.1',
+ force_snat_range=['10.10.10.0/24'])
+ expected = ('-s 10.0.0.0/24 -d 10.10.10.0/24 '
+ '-j SNAT --to-source 10.10.10.1')
+ self._test_add_snat_rule(expected, True)
def test_update_dhcp_for_nw00(self):
self.flags(use_single_default_gateway=True)
@@ -405,14 +430,22 @@ def test_get_dns_hosts_for_nw01(self):
self.assertEqual(actual_hosts, expected)
def test_get_dhcp_opts_for_nw00(self):
- expected_opts = 'NW-3,3\nNW-4,3'
+ self.flags(use_single_default_gateway=True)
+ expected_opts = 'NW-0,3,192.168.0.1\nNW-3,3\nNW-4,3'
+ actual_opts = self.driver.get_dhcp_opts(self.context, networks[0])
+
+ self.assertEqual(actual_opts, expected_opts)
+
+ def test_get_dhcp_opts_for_nw00_no_single_default_gateway(self):
+ self.flags(use_single_default_gateway=False)
+ expected_opts = '3,192.168.0.1'
actual_opts = self.driver.get_dhcp_opts(self.context, networks[0])
self.assertEqual(actual_opts, expected_opts)
def test_get_dhcp_opts_for_nw01(self):
- self.flags(host='fake_instance01')
- expected_opts = "NW-5,3"
+ self.flags(use_single_default_gateway=True, host='fake_instance01')
+ expected_opts = "NW-2,3,192.168.1.1\nNW-5,3"
actual_opts = self.driver.get_dhcp_opts(self.context, networks[1])
self.assertEqual(actual_opts, expected_opts)
@@ -571,12 +604,14 @@ def test_ensure(bridge, interface, network, gateway):
def _test_dnsmasq_execute(self, extra_expected=None):
network_ref = {'id': 'fake',
'label': 'fake',
+ 'gateway': '10.0.0.1',
'multi_host': False,
'cidr': '10.0.0.0/24',
'netmask': '255.255.255.0',
'dns1': '8.8.4.4',
'dhcp_start': '1.0.0.2',
- 'dhcp_server': '10.0.0.1'}
+ 'dhcp_server': '10.0.0.1',
+ 'share_address': False}
def fake_execute(*args, **kwargs):
executes.append(args)
@@ -607,6 +642,7 @@ def fake_add_dhcp_mangle_rule(*args, **kwargs):
'--bind-interfaces',
'--conf-file=%s' % CONF.dnsmasq_config_file,
'--pid-file=%s' % linux_net._dhcp_file(dev, 'pid'),
+ '--dhcp-optsfile=%s' % linux_net._dhcp_file(dev, 'opts'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
"--dhcp-range=set:%s,%s,static,%s,%ss" % (network_ref['label'],
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index e2f62e2617..ceece4345b 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -76,6 +76,7 @@
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
+ 'dhcp_server': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
@@ -98,6 +99,7 @@
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
+ 'dhcp_server': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
@@ -331,9 +333,63 @@ def test_validate_reserved(self):
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
- self.assertEqual(3, db.network_count_reserved_ips(context_admin,
+ self.assertEqual(4, db.network_count_reserved_ips(context_admin,
network['id']))
+ def test_validate_reserved_start_end(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ nets = self.network.create_networks(context_admin, 'fake',
+ '192.168.0.0/24', False, 1,
+ 256, dhcp_server='192.168.0.11',
+ allowed_start='192.168.0.10',
+ allowed_end='192.168.0.245')
+ self.assertEqual(1, len(nets))
+ network = nets[0]
+ # gateway defaults to beginning of allowed_start
+ self.assertEqual('192.168.0.10', network['gateway'])
+ # vpn_server doesn't conflict with dhcp_start
+ self.assertEqual('192.168.0.12', network['vpn_private_address'])
+ # dhcp_start doesn't conflict with dhcp_server
+ self.assertEqual('192.168.0.13', network['dhcp_start'])
+ # NOTE(vish): 10 from the beginning, 10 from the end, and
+ # 1 for the gateway, 1 for the dhcp server,
+ # 1 for the vpn server
+ self.assertEqual(23, db.network_count_reserved_ips(context_admin,
+ network['id']))
+
+ def test_validate_reserved_start_out_of_range(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ self.assertRaises(exception.AddressOutOfRange,
+ self.network.create_networks,
+ context_admin, 'fake', '192.168.0.0/24', False,
+ 1, 256, allowed_start='192.168.1.10')
+
+ def test_validate_reserved_end_invalid(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ self.assertRaises(exception.InvalidAddress,
+ self.network.create_networks,
+ context_admin, 'fake', '192.168.0.0/24', False,
+ 1, 256, allowed_end='invalid')
+
+ def test_validate_cidr_invalid(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ self.assertRaises(exception.InvalidCidr,
+ self.network.create_networks,
+ context_admin, 'fake', 'invalid', False,
+ 1, 256)
+
+ def test_validate_non_int_size(self):
+ context_admin = context.RequestContext('testuser', 'testproject',
+ is_admin=True)
+ self.assertRaises(exception.InvalidIntValue,
+ self.network.create_networks,
+ context_admin, 'fake', '192.168.0.0/24', False,
+ 1, 'invalid')
+
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
@@ -421,7 +477,7 @@ def test_add_fixed_ip_instance_using_id_without_vpn(self, reserve):
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(), use_subordinate=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
@@ -474,7 +530,7 @@ def test_add_fixed_ip_instance_using_uuid_without_vpn(self, reserve):
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(), use_subordinate=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
@@ -571,7 +627,7 @@ def test_instance_dns(self, reserve):
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(), use_subordinate=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
@@ -730,7 +786,7 @@ def test_allocate_fixed_ip_cleanup(self,
mock.call(instance.uuid, '')
])
- mock_fixedip_disassociate.assert_called_once()
+ mock_fixedip_disassociate.assert_called_once_with(self.context)
class FlatDHCPNetworkTestCase(test.TestCase):
@@ -815,7 +871,7 @@ def test_vpn_allocate_fixed_ip(self):
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(), use_subordinate=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
@@ -863,7 +919,7 @@ def test_allocate_fixed_ip(self):
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(), use_subordinate=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
@@ -1101,12 +1157,14 @@ def fake_allocate_address(*args, **kwargs):
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
- def test_deallocate_floating_ip(self):
+ @mock.patch('nova.quota.QUOTAS.reserve')
+ @mock.patch('nova.quota.QUOTAS.commit')
+ def test_deallocate_floating_ip(self, mock_commit, mock_reserve):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
- pass
+ return dict(test_floating_ip.fake_floating_ip)
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
@@ -1127,10 +1185,14 @@ def fake3(*args, **kwargs):
ctxt,
mox.IgnoreArg())
+ mock_reserve.return_value = 'reserve'
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
+ mock_commit.assert_called_once_with(ctxt, 'reserve',
+ project_id='testproject')
+
@mock.patch('nova.db.fixed_ip_get')
def test_associate_floating_ip(self, fixed_get):
ctxt = context.RequestContext('testuser', 'testproject',
@@ -1467,7 +1529,7 @@ def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.instance_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(), use_subordinate=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
@@ -1774,7 +1836,8 @@ def dnsdomain_get(context, instance_domain):
self.assertTrue(res)
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None,
- extra_reserved=None):
+ extra_reserved=None, bottom_reserved=0,
+ top_reserved=0):
return None
def test_get_instance_nw_info_client_exceptions(self):
@@ -1783,7 +1846,7 @@ def test_get_instance_nw_info_client_exceptions(self):
'virtual_interface_get_by_instance')
manager.db.virtual_interface_get_by_instance(
self.context, FAKEUUID,
- use_slave=False).AndRaise(exception.InstanceNotFound(
+ use_subordinate=False).AndRaise(exception.InstanceNotFound(
instance_id=FAKEUUID))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
@@ -2281,6 +2344,7 @@ def _test_init_host_dynamic_fixed_range(self, net_manager):
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.2.1',
+ 'dhcp_server': '192.168.2.1',
'broadcast': '192.168.2.255',
'dns1': '192.168.2.1',
'dns2': '192.168.2.2',
@@ -2390,6 +2454,8 @@ class TestFloatingIPManager(floating_ips.FloatingIP,
class AllocateTestCase(test.TestCase):
def setUp(self):
super(AllocateTestCase, self).setUp()
+ dns = 'nova.network.noop_dns_driver.NoopDNSDriver'
+ self.flags(instance_dns_manager=dns)
self.useFixture(test.SampleNetworks())
self.conductor = self.start_service(
'conductor', manager=CONF.conductor.manager)
@@ -2401,6 +2467,8 @@ def setUp(self):
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
+ self.user_context = context.RequestContext('testuser',
+ 'testproject')
def test_allocate_for_instance(self):
address = "10.10.10.10"
@@ -2419,8 +2487,8 @@ def test_allocate_for_instance(self):
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
- project_id = self.context.project_id
- nw_info = self.network.allocate_for_instance(self.context,
+ project_id = self.user_context.project_id
+ nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=None)
@@ -2430,6 +2498,32 @@ def test_allocate_for_instance(self):
self.network.deallocate_for_instance(self.context,
instance=inst)
+ def test_allocate_for_instance_illegal_network(self):
+ networks = db.network_get_all(self.context)
+ requested_networks = []
+ for network in networks:
+ # set all networks to other projects
+ db.network_update(self.context, network['id'],
+ {'host': self.network.host,
+ 'project_id': 'otherid'})
+ requested_networks.append((network['uuid'], None))
+ # set the first network to our project
+ db.network_update(self.context, networks[0]['id'],
+ {'project_id': self.user_context.project_id})
+
+ inst = objects.Instance()
+ inst.host = self.compute.host
+ inst.display_name = HOST
+ inst.instance_type_id = 1
+ inst.uuid = FAKEUUID
+ inst.create(self.context)
+ self.assertRaises(exception.NetworkNotFoundForProject,
+ self.network.allocate_for_instance, self.user_context,
+ instance_id=inst['id'], instance_uuid=inst['uuid'],
+ host=inst['host'], vpn=None, rxtx_factor=3,
+ project_id=self.context.project_id, macs=None,
+ requested_networks=requested_networks)
+
def test_allocate_for_instance_with_mac(self):
available_macs = set(['ca:fe:de:ad:be:ef'])
inst = db.instance_create(self.context, {'host': self.compute.host,
@@ -2440,7 +2534,7 @@ def test_allocate_for_instance_with_mac(self):
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
- nw_info = self.network.allocate_for_instance(self.context,
+ nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
@@ -2463,7 +2557,8 @@ def test_allocate_for_instance_not_enough_macs(self):
{'host': self.network.host})
project_id = self.context.project_id
self.assertRaises(exception.VirtualInterfaceCreateException,
- self.network.allocate_for_instance, self.context,
+ self.network.allocate_for_instance,
+ self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
@@ -2987,6 +3082,32 @@ def test_associate_floating_ip_failure_interface_not_found(self):
self._test_associate_floating_ip_failure('Cannot find device',
exception.NoFloatingIpInterface)
+ @mock.patch('nova.objects.FloatingIP.get_by_address')
+ def test_get_floating_ip_by_address(self, mock_get):
+ mock_get.return_value = mock.sentinel.floating
+ self.assertEqual(mock.sentinel.floating,
+ self.network.get_floating_ip_by_address(
+ self.context,
+ mock.sentinel.address))
+ mock_get.assert_called_once_with(self.context, mock.sentinel.address)
+
+ @mock.patch('nova.objects.FloatingIPList.get_by_project')
+ def test_get_floating_ips_by_project(self, mock_get):
+ mock_get.return_value = mock.sentinel.floatings
+ self.assertEqual(mock.sentinel.floatings,
+ self.network.get_floating_ips_by_project(
+ self.context))
+ mock_get.assert_called_once_with(self.context, self.context.project_id)
+
+ @mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
+ def test_get_floating_ips_by_fixed_address(self, mock_get):
+ mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'),
+ objects.FloatingIP(address='5.6.7.8')]
+ self.assertEqual(['1.2.3.4', '5.6.7.8'],
+ self.network.get_floating_ips_by_fixed_address(
+ self.context, mock.sentinel.address))
+ mock_get.assert_called_once_with(self.context, mock.sentinel.address)
+
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
diff --git a/nova/tests/network/test_network_info.py b/nova/tests/network/test_network_info.py
index 6ca75331eb..e974fa190b 100644
--- a/nova/tests/network/test_network_info.py
+++ b/nova/tests/network/test_network_info.py
@@ -506,7 +506,8 @@ def test_hydrate(self):
def _setup_injected_network_scenario(self, should_inject=True,
use_ipv4=True, use_ipv6=False,
gateway=True, dns=True,
- two_interfaces=False):
+ two_interfaces=False,
+ libvirt_virt_type=None):
"""Check that netutils properly decides whether to inject based on
whether the supplied subnet is static or dynamic.
"""
@@ -548,8 +549,8 @@ def _setup_injected_network_scenario(self, should_inject=True,
vifs.append(vif)
nwinfo = model.NetworkInfo(vifs)
- return netutils.get_injected_network_template(nwinfo,
- use_ipv6=use_ipv6)
+ return netutils.get_injected_network_template(
+ nwinfo, use_ipv6=use_ipv6, libvirt_virt_type=libvirt_virt_type)
def test_injection_dynamic(self):
expected = None
@@ -714,3 +715,70 @@ def test_injection_ipv6_two_interfaces(self):
template = self._setup_injected_network_scenario(use_ipv6=True,
two_interfaces=True)
self.assertEqual(expected, template)
+
+ def test_injection_ipv6_with_lxc(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+ post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
+ post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
+
+auto eth1
+iface eth1 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ gateway 10.10.0.1
+ dns-nameservers 1.2.3.4 2.3.4.5
+ post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
+ post-up ip -6 route add default via 1234:567::1 dev ${IFACE}
+"""
+ template = self._setup_injected_network_scenario(
+ use_ipv6=True, two_interfaces=True, libvirt_virt_type='lxc')
+ self.assertEqual(expected, template)
+
+ def test_injection_ipv6_with_lxc_no_gateway(self):
+ expected = """\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ dns-nameservers 1.2.3.4 2.3.4.5
+ post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
+
+auto eth1
+iface eth1 inet static
+ address 10.10.0.2
+ netmask 255.255.255.0
+ broadcast 10.10.0.255
+ dns-nameservers 1.2.3.4 2.3.4.5
+ post-up ip -6 addr add 1234:567::2/48 dev ${IFACE}
+"""
+ template = self._setup_injected_network_scenario(
+ use_ipv6=True, gateway=False, two_interfaces=True,
+ libvirt_virt_type='lxc')
+ self.assertEqual(expected, template)
diff --git a/nova/tests/network/test_neutronv2.py b/nova/tests/network/test_neutronv2.py
index ff8cf3c29a..a55d37ad21 100644
--- a/nova/tests/network/test_neutronv2.py
+++ b/nova/tests/network/test_neutronv2.py
@@ -33,15 +33,18 @@
from nova.network.neutronv2 import api as neutronapi
from nova.network.neutronv2 import constants
from nova.openstack.common import jsonutils
+from nova.openstack.common import policy as common_policy
+from nova import policy
from nova import test
+from nova.tests import fake_instance
from nova import utils
CONF = cfg.CONF
-#NOTE: Neutron client raises Exception which is discouraged by HACKING.
-# We set this variable here and use it for assertions below to avoid
-# the hacking checks until we can make neutron client throw a custom
-# exception class instead.
+# NOTE: Neutron client raises Exception which is discouraged by HACKING.
+# We set this variable here and use it for assertions below to avoid
+# the hacking checks until we can make neutron client throw a custom
+# exception class instead.
NEUTRON_CLIENT_EXCEPTION = Exception
@@ -227,8 +230,19 @@ def setUp(self):
'name': 'out-of-this-world',
'router:external': True,
'tenant_id': 'should-be-an-admin'}]
+ # A network request with a duplicate
+ self.nets6 = []
+ self.nets6.append(self.nets1[0])
+ self.nets6.append(self.nets1[0])
+ # A network request with a combo
+ self.nets7 = []
+ self.nets7.append(self.nets2[1])
+ self.nets7.append(self.nets1[0])
+ self.nets7.append(self.nets2[1])
+ self.nets7.append(self.nets1[0])
+
self.nets = [self.nets1, self.nets2, self.nets3,
- self.nets4, self.nets5]
+ self.nets4, self.nets5, self.nets6, self.nets7]
self.port_address = '10.0.1.2'
self.port_data1 = [{'network_id': 'my_netid1',
@@ -327,6 +341,13 @@ def setUp(self):
self.addCleanup(self.stubs.UnsetAll)
def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
+ # TODO(mriedem): Remove this conversion when all neutronv2 APIs are
+ # converted to handling instance objects.
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
+ self.instance2 = fake_instance.fake_instance_obj(self.context,
+ **self.instance2)
+
api = neutronapi.API()
self.mox.StubOutWithMock(api, 'get_instance_nw_info')
has_portbinding = False
@@ -356,37 +377,56 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
if macs:
macs = set(macs)
req_net_ids = []
+ ordered_networks = []
+ port = {}
if 'requested_networks' in kwargs:
- for id, fixed_ip, port_id in kwargs['requested_networks']:
+ for n_id, fixed_ip, port_id in kwargs['requested_networks']:
if port_id:
- self.moxed_client.show_port(port_id).AndReturn(
- {'port': {'id': 'my_portid1',
- 'network_id': 'my_netid1',
- 'mac_address': 'my_mac1',
- 'device_id': kwargs.get('_device') and
- self.instance2['uuid'] or ''}})
-
- ports['my_netid1'] = self.port_data1[0]
- id = 'my_netid1'
- if macs is not None:
- macs.discard('my_mac1')
+ if port_id == 'my_portid3':
+ self.moxed_client.show_port(port_id).AndReturn(
+ {'port': {'id': 'my_portid3',
+ 'network_id': 'my_netid1',
+ 'mac_address': 'my_mac1',
+ 'device_id': kwargs.get('_device') and
+ self.instance2.uuid or
+ ''}})
+ ports['my_netid1'] = [self.port_data1[0],
+ self.port_data3[0]]
+ ports[port_id] = self.port_data3[0]
+ n_id = 'my_netid1'
+ if macs is not None:
+ macs.discard('my_mac1')
+ else:
+ self.moxed_client.show_port(port_id).AndReturn(
+ {'port': {'id': 'my_portid1',
+ 'network_id': 'my_netid1',
+ 'mac_address': 'my_mac1',
+ 'device_id': kwargs.get('_device') and
+ self.instance2.uuid or
+ ''}})
+ ports[port_id] = self.port_data1[0]
+ n_id = 'my_netid1'
+ if macs is not None:
+ macs.discard('my_mac1')
else:
- fixed_ips[id] = fixed_ip
- req_net_ids.append(id)
- expected_network_order = req_net_ids
+ fixed_ips[n_id] = fixed_ip
+ req_net_ids.append(n_id)
+ ordered_networks.append((n_id, fixed_ip, port_id))
else:
- expected_network_order = [n['id'] for n in nets]
+ for n in nets:
+ ordered_networks.append((n['id'], None, None))
if kwargs.get('_break') == 'pre_list_networks':
self.mox.ReplayAll()
return api
- search_ids = [net['id'] for net in nets if net['id'] in req_net_ids]
+ # search all req_net_ids as in api.py
+ search_ids = req_net_ids
if search_ids:
mox_list_params = {'id': mox.SameElementsAs(search_ids)}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
else:
- mox_list_params = {'tenant_id': self.instance['project_id'],
+ mox_list_params = {'tenant_id': self.instance.project_id,
'shared': False}
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': nets})
@@ -394,18 +434,34 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
self.moxed_client.list_networks(
**mox_list_params).AndReturn({'networks': []})
+ if (('requested_networks' not in kwargs
+ or kwargs['requested_networks'] == [(None, None, None)])
+ and len(nets) > 1):
+ self.mox.ReplayAll()
+ return api
+
ports_in_requested_net_order = []
- for net_id in expected_network_order:
+ nets_in_requested_net_order = []
+ for net_id, fixed_ip, port_id in ordered_networks:
port_req_body = {
'port': {
- 'device_id': self.instance['uuid'],
+ 'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
},
}
+ # Network lookup for available network_id
+ network = None
+ for net in nets:
+ if net['id'] == net_id:
+ network = net
+ break
+ # if net_id did not pass validate_networks() and not available
+ # here then skip it safely not continuing with a None Network
+ else:
+ continue
if has_portbinding:
port_req_body['port']['binding:host_id'] = (
self.instance.get('host'))
- port = ports.get(net_id, None)
if not has_portbinding:
api._populate_neutron_extension_values(mox.IgnoreArg(),
self.instance, mox.IgnoreArg()).AndReturn(None)
@@ -416,8 +472,8 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
AndReturn(has_portbinding)
api._has_port_binding_extension(mox.IgnoreArg()).\
AndReturn(has_portbinding)
- if port:
- port_id = port['id']
+ if port_id:
+ port = ports[port_id]
self.moxed_client.update_port(port_id,
MyComparator(port_req_body)
).AndReturn(
@@ -431,7 +487,7 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
port_req_body['port']['network_id'] = net_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = \
- self.instance['project_id']
+ self.instance.project_id
if macs:
port_req_body['port']['mac_address'] = macs.pop()
if has_portbinding:
@@ -447,9 +503,11 @@ def _stub_allocate_for_instance(self, net_idx=1, **kwargs):
MyComparator(port_req_body)).AndReturn(res_port)
ports_in_requested_net_order.append(res_port['port']['id'])
+ nets_in_requested_net_order.append(network)
+
api.get_instance_nw_info(mox.IgnoreArg(),
self.instance,
- networks=nets,
+ networks=nets_in_requested_net_order,
port_ids=ports_in_requested_net_order
).AndReturn(self._returned_nw_info)
self.mox.ReplayAll()
@@ -467,9 +525,10 @@ def _verify_nw_info(self, nw_inf, index=0):
self.assertEqual('my_mac%s' % id_suffix, nw_inf[index]['address'])
self.assertEqual('10.0.%s.0/24' % id_suffix,
nw_inf[index]['network']['subnets'][0]['cidr'])
- self.assertTrue(model.IP(address='8.8.%s.1' % id_suffix,
- version=4, type='dns') in
- nw_inf[index]['network']['subnets'][0]['dns'])
+
+ ip_addr = model.IP(address='8.8.%s.1' % id_suffix,
+ version=4, type='dns')
+ self.assertIn(ip_addr, nw_inf[index]['network']['subnets'][0]['dns'])
def _get_instance_nw_info(self, number):
api = neutronapi.API()
@@ -781,7 +840,10 @@ def test_allocate_for_instance_1(self):
def test_allocate_for_instance_2(self):
# Allocate one port in two networks env.
- self._allocate_for_instance(2)
+ api = self._stub_allocate_for_instance(net_idx=2)
+ self.assertRaises(exception.NetworkAmbiguous,
+ api.allocate_for_instance,
+ self.context, self.instance)
def test_allocate_for_instance_accepts_macs_kwargs_None(self):
# The macs kwarg should be accepted as None.
@@ -857,6 +919,23 @@ def test_allocate_for_instance_mac_conflicting_requested_port(self):
self.instance, requested_networks=requested_networks,
macs=set(['unknown:mac']))
+ def test_allocate_for_instance_without_requested_networks(self):
+ api = self._stub_allocate_for_instance(net_idx=3)
+ self.assertRaises(exception.NetworkAmbiguous,
+ api.allocate_for_instance,
+ self.context, self.instance)
+
+ def test_allocate_for_instance_with_requested_non_available_network(self):
+ """verify that a non available network is ignored.
+ self.nets2 (net_idx=2) is composed of self.nets3[0] and self.nets3[1]
+ Do not create a port on a non available network self.nets3[2].
+ """
+ requested_networks = [
+ (net['id'], None, None)
+ for net in (self.nets3[0], self.nets3[2], self.nets3[1])]
+ self._allocate_for_instance(net_idx=2,
+ requested_networks=requested_networks)
+
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
requested_networks = [
@@ -872,15 +951,17 @@ def test_allocate_for_instance_with_requested_networks_with_fixedip(self):
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_port(self):
- requested_networks = [(None, None, 'myportid1')]
+ requested_networks = [(None, None, 'my_portid1')]
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_no_networks(self):
"""verify the exception thrown when there are no networks defined."""
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
api = neutronapi.API()
self.moxed_client.list_networks(
- tenant_id=self.instance['project_id'],
+ tenant_id=self.instance.project_id,
shared=False).AndReturn(
{'networks': model.NetworkInfo([])})
self.moxed_client.list_networks(shared=True).AndReturn(
@@ -896,22 +977,23 @@ def test_allocate_for_instance_ex1(self):
Mox to raise exception when creating a second port.
In this case, the code should delete the first created port.
"""
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg()).MultipleTimes().\
AndReturn(False)
+ requested_networks = [
+ (net['id'], None, None)
+ for net in (self.nets2[0], self.nets2[1])]
self.moxed_client.list_networks(
- tenant_id=self.instance['project_id'],
- shared=False).AndReturn(
- {'networks': self.nets2})
- self.moxed_client.list_networks(shared=True).AndReturn(
- {'networks': []})
+ id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2})
index = 0
for network in self.nets2:
binding_port_req_body = {
'port': {
- 'device_id': self.instance['uuid'],
+ 'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
},
}
@@ -919,7 +1001,7 @@ def test_allocate_for_instance_ex1(self):
'port': {
'network_id': network['id'],
'admin_state_up': True,
- 'tenant_id': self.instance['project_id'],
+ 'tenant_id': self.instance.project_id,
},
}
port_req_body['port'].update(binding_port_req_body['port'])
@@ -931,9 +1013,7 @@ def test_allocate_for_instance_ex1(self):
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn({'port': port})
else:
- NeutronOverQuota = exceptions.NeutronClientException(
- message="Quota exceeded for resources: ['port']",
- status_code=409)
+ NeutronOverQuota = exceptions.OverQuotaClient()
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(NeutronOverQuota)
index += 1
@@ -941,7 +1021,8 @@ def test_allocate_for_instance_ex1(self):
self.mox.ReplayAll()
self.assertRaises(exception.PortLimitExceeded,
api.allocate_for_instance,
- self.context, self.instance)
+ self.context, self.instance,
+ requested_networks=requested_networks)
def test_allocate_for_instance_ex2(self):
"""verify we have no port to delete
@@ -950,20 +1031,21 @@ def test_allocate_for_instance_ex2(self):
Mox to raise exception when creating the first port.
In this case, the code should not delete any ports.
"""
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_populate_neutron_extension_values')
self.mox.StubOutWithMock(api, '_has_port_binding_extension')
api._has_port_binding_extension(mox.IgnoreArg()).MultipleTimes().\
AndReturn(False)
+ requested_networks = [
+ (net['id'], None, None)
+ for net in (self.nets2[0], self.nets2[1])]
self.moxed_client.list_networks(
- tenant_id=self.instance['project_id'],
- shared=False).AndReturn(
- {'networks': self.nets2})
- self.moxed_client.list_networks(shared=True).AndReturn(
- {'networks': []})
+ id=['my_netid1', 'my_netid2']).AndReturn({'networks': self.nets2})
binding_port_req_body = {
'port': {
- 'device_id': self.instance['uuid'],
+ 'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
},
}
@@ -971,8 +1053,8 @@ def test_allocate_for_instance_ex2(self):
'port': {
'network_id': self.nets2[0]['id'],
'admin_state_up': True,
- 'device_id': self.instance['uuid'],
- 'tenant_id': self.instance['project_id'],
+ 'device_id': self.instance.uuid,
+ 'tenant_id': self.instance.project_id,
},
}
api._populate_neutron_extension_values(self.context,
@@ -982,16 +1064,19 @@ def test_allocate_for_instance_ex2(self):
Exception("fail to create port"))
self.mox.ReplayAll()
self.assertRaises(NEUTRON_CLIENT_EXCEPTION, api.allocate_for_instance,
- self.context, self.instance)
+ self.context, self.instance,
+ requested_networks=requested_networks)
def test_allocate_for_instance_no_port_or_network(self):
class BailOutEarly(Exception):
pass
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
api = neutronapi.API()
self.mox.StubOutWithMock(api, '_get_available_networks')
# Make sure we get an empty list and then bail out of the rest
# of the function
- api._get_available_networks(self.context, self.instance['project_id'],
+ api._get_available_networks(self.context, self.instance.project_id,
[]).AndRaise(BailOutEarly)
self.mox.ReplayAll()
self.assertRaises(BailOutEarly,
@@ -1019,13 +1104,17 @@ def test_allocate_for_instance_port_in_use(self):
self.instance, requested_networks=requested_networks)
def _deallocate_for_instance(self, number, requested_networks=None):
+ # TODO(mriedem): Remove this conversion when all neutronv2 APIs are
+ # converted to handling instance objects.
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
api = neutronapi.API()
port_data = number == 1 and self.port_data1 or self.port_data2
ret_data = copy.deepcopy(port_data)
if requested_networks:
for net, fip, port in requested_networks:
ret_data.append({'network_id': net,
- 'device_id': self.instance['uuid'],
+ 'device_id': self.instance.uuid,
'device_owner': 'compute:nova',
'id': port,
'status': 'DOWN',
@@ -1033,7 +1122,7 @@ def _deallocate_for_instance(self, number, requested_networks=None):
'fixed_ips': [],
'mac_address': 'fake_mac', })
self.moxed_client.list_ports(
- device_id=self.instance['uuid']).AndReturn(
+ device_id=self.instance.uuid).AndReturn(
{'ports': ret_data})
if requested_networks:
for net, fip, port in requested_networks:
@@ -1043,7 +1132,7 @@ def _deallocate_for_instance(self, number, requested_networks=None):
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(self.context,
- self.instance['uuid'],
+ self.instance.uuid,
{'network_info': '[]'})
self.mox.ReplayAll()
@@ -1070,9 +1159,13 @@ def test_deallocate_for_instance_2(self):
self._deallocate_for_instance(2)
def test_deallocate_for_instance_port_not_found(self):
+ # TODO(mriedem): Remove this conversion when all neutronv2 APIs are
+ # converted to handling instance objects.
+ self.instance = fake_instance.fake_instance_obj(self.context,
+ **self.instance)
port_data = self.port_data1
self.moxed_client.list_ports(
- device_id=self.instance['uuid']).AndReturn(
+ device_id=self.instance.uuid).AndReturn(
{'ports': port_data})
NeutronNotFound = neutronv2.exceptions.NeutronClientException(
@@ -1211,9 +1304,10 @@ def test_validate_networks_ex_2(self):
except exception.NetworkNotFound as ex:
self.assertIn("my_netid2, my_netid3", str(ex))
- def test_validate_networks_duplicate(self):
+ def test_validate_networks_duplicate_disable(self):
"""Verify that the correct exception is thrown when duplicate
- network ids are passed to validate_networks.
+ network ids are passed to validate_networks, when nova config flag
+ allow_duplicate_networks is set to its default value: False
"""
requested_networks = [('my_netid1', None, None),
('my_netid1', None, None)]
@@ -1222,8 +1316,59 @@ def test_validate_networks_duplicate(self):
neutronv2.get_client(None)
api = neutronapi.API()
self.assertRaises(exception.NetworkDuplicated,
- api.validate_networks,
- self.context, requested_networks, 1)
+ api.validate_networks,
+ self.context, requested_networks, 1)
+
+ def test_validate_networks_duplicate_enable(self):
+ """Verify that no duplicateNetworks exception is thrown when duplicate
+ network ids are passed to validate_networks, when nova config flag
+ allow_duplicate_networks is set to its non default value: True
+ """
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ requested_networks = [('my_netid1', None, None),
+ ('my_netid1', None, None)]
+ ids = ['my_netid1', 'my_netid1']
+
+ self.moxed_client.list_networks(
+ id=mox.SameElementsAs(ids)).AndReturn(
+ {'networks': self.nets1})
+ self.moxed_client.list_ports(tenant_id='my_tenantid').AndReturn(
+ {'ports': []})
+ self.moxed_client.show_quota(
+ tenant_id='my_tenantid').AndReturn(
+ {'quota': {'port': 50}})
+ self.mox.ReplayAll()
+ api = neutronapi.API()
+ api.validate_networks(self.context, requested_networks, 1)
+
+ def test_allocate_for_instance_with_requested_networks_duplicates(self):
+ # specify a duplicate network to allocate to instance
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ requested_networks = [
+ (net['id'], None, None)
+ for net in (self.nets6[0], self.nets6[1])]
+ self._allocate_for_instance(net_idx=6,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_requested_networks_duplicates_port(self):
+ # specify first port and last port that are in same network
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ requested_networks = [
+ (None, None, port['id'])
+ for port in (self.port_data1[0], self.port_data3[0])]
+ self._allocate_for_instance(net_idx=6,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_requested_networks_duplicates_combo(self):
+ # specify a combo net_idx=7 : net2, port in net1, net2, port in net1
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ requested_networks = [
+ ('my_netid2', None, None),
+ (None, None, self.port_data1[0]['id']),
+ ('my_netid2', None, None),
+ (None, None, self.port_data3[0]['id'])]
+ self._allocate_for_instance(net_idx=7,
+ requested_networks=requested_networks)
def test_validate_networks_not_specified(self):
requested_networks = []
@@ -1315,10 +1460,15 @@ def test_validate_networks_no_subnet_id(self):
api.validate_networks,
self.context, requested_networks, 1)
- def test_validate_networks_ports_in_same_network(self):
+ def test_validate_networks_ports_in_same_network_disable(self):
+ """Verify that duplicateNetworks exception is thrown when ports on same
+ duplicate network are passed to validate_networks, when nova config
+ flag allow_duplicate_networks is set to its default False
+ """
+ self.flags(allow_duplicate_networks=False, group='neutron')
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
- 'subnet_id': 'subnet_id'}
+ 'subnet_id': 'subnet_id'}
port_b = self.port_data1[0]
self.assertEqual(port_a['network_id'], port_b['network_id'])
for port in [port_a, port_b]:
@@ -1326,9 +1476,11 @@ def test_validate_networks_ports_in_same_network(self):
port['device_owner'] = None
requested_networks = [(None, None, port_a['id']),
- (None, None, port_b['id'])]
- self.moxed_client.show_port(port_a['id']).AndReturn({'port': port_a})
- self.moxed_client.show_port(port_b['id']).AndReturn({'port': port_b})
+ (None, None, port_b['id'])]
+ self.moxed_client.show_port(port_a['id']).AndReturn(
+ {'port': port_a})
+ self.moxed_client.show_port(port_b['id']).AndReturn(
+ {'port': port_b})
self.mox.ReplayAll()
@@ -1337,6 +1489,33 @@ def test_validate_networks_ports_in_same_network(self):
api.validate_networks,
self.context, requested_networks, 1)
+ def test_validate_networks_ports_in_same_network_enable(self):
+ """Verify that duplicateNetworks exception is not thrown when ports
+ on same duplicate network are passed to validate_networks, when nova
+ config flag allow_duplicate_networks is set to its True
+ """
+ self.flags(allow_duplicate_networks=True, group='neutron')
+ port_a = self.port_data3[0]
+ port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
+ 'subnet_id': 'subnet_id'}
+ port_b = self.port_data1[0]
+ self.assertEqual(port_a['network_id'], port_b['network_id'])
+ for port in [port_a, port_b]:
+ port['device_id'] = None
+ port['device_owner'] = None
+
+ requested_networks = [(None, None, port_a['id']),
+ (None, None, port_b['id'])]
+ self.moxed_client.show_port(port_a['id']).AndReturn(
+ {'port': port_a})
+ self.moxed_client.show_port(port_b['id']).AndReturn(
+ {'port': port_b})
+
+ self.mox.ReplayAll()
+
+ api = neutronapi.API()
+ api.validate_networks(self.context, requested_networks, 1)
+
def test_validate_networks_ports_not_in_same_network(self):
port_a = self.port_data3[0]
port_a['fixed_ips'] = {'ip_address': '10.0.0.2',
@@ -1571,6 +1750,13 @@ def test_get_available_networks_with_externalnet_admin_ctx(self):
self._get_available_networks(self.nets5, pub_nets=[],
req_ids=req_ids, context=admin_ctx)
+ def test_get_available_networks_with_custom_policy(self):
+ rules = {'network:attach_external_network':
+ common_policy.parse_rule('')}
+ policy.set_rules(rules)
+ req_ids = [net['id'] for net in self.nets5]
+ self._get_available_networks(self.nets5, pub_nets=[], req_ids=req_ids)
+
def test_get_floating_ip_pools(self):
api = neutronapi.API()
search_opts = {'router:external': True}
@@ -1578,8 +1764,7 @@ def test_get_floating_ip_pools(self):
AndReturn({'networks': [self.fip_pool, self.fip_pool_nova]})
self.mox.ReplayAll()
pools = api.get_floating_ip_pools(self.context)
- expected = [{'name': self.fip_pool['name']},
- {'name': self.fip_pool_nova['name']}]
+ expected = [self.fip_pool['name'], self.fip_pool_nova['name']]
self.assertEqual(expected, pools)
def _get_expected_fip_model(self, fip_data, idx=0):
@@ -1855,7 +2040,8 @@ def test_associate_floating_ip(self):
api.associate_floating_ip(self.context, self.instance,
address, fixed_address)
- def test_reassociate_floating_ip(self):
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ def test_reassociate_floating_ip(self, mock_get):
api = neutronapi.API()
address = self.fip_associated['floating_ip_address']
new_fixed_address = self.port_address
@@ -1872,11 +2058,10 @@ def test_reassociate_floating_ip(self):
'fixed_ip_address': new_fixed_address}})
self.moxed_client.show_port(self.fip_associated['port_id']).\
AndReturn({'port': self.port_data2[1]})
- self.mox.StubOutWithMock(api.db, 'instance_get_by_uuid')
- api.db.instance_get_by_uuid(mox.IgnoreArg(),
- self.instance['uuid']).\
- AndReturn(self.instance)
- self._setup_mock_for_refresh_cache(api, [self.instance,
+
+ mock_get.return_value = fake_instance.fake_instance_obj(
+ self.context, **self.instance)
+ self._setup_mock_for_refresh_cache(api, [mock_get.return_value,
self.instance2])
self.mox.ReplayAll()
@@ -2039,6 +2224,13 @@ def test_nw_info_build_network_ovs(self):
self.assertNotIn('should_create_bridge', net)
self.assertEqual(iid, 'port-id')
+ def test_nw_info_build_network_dvs(self):
+ net, iid = self._test_nw_info_build_network(model.VIF_TYPE_DVS)
+ self.assertEqual('foo-net-id', net['bridge'])
+ self.assertNotIn('should_create_bridge', net)
+ self.assertNotIn('ovs_interfaceid', net)
+ self.assertIsNone(iid)
+
def test_nw_info_build_network_bridge(self):
net, iid = self._test_nw_info_build_network(model.VIF_TYPE_BRIDGE)
self.assertEqual(net['bridge'], 'brqnet-id')
@@ -2326,6 +2518,62 @@ def test_allocate_floating_ip_exceed_limit(self):
api.allocate_floating_ip,
self.context, pool_name)
+ def test_create_port_for_instance_no_more_ip(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ net = {'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': instance['project_id']}
+
+ with mock.patch.object(client.Client, 'create_port',
+ side_effect=exceptions.IpAddressGenerationFailureClient()) as (
+ create_port_mock):
+ zone = 'compute:%s' % instance['availability_zone']
+ port_req_body = {'port': {'device_id': instance['uuid'],
+ 'device_owner': zone}}
+ self.assertRaises(exception.NoMoreFixedIps,
+ self.api._create_port,
+ neutronv2.get_client(self.context),
+ instance, net['id'], port_req_body)
+ create_port_mock.assert_called_once_with(port_req_body)
+
+ @mock.patch.object(client.Client, 'create_port',
+ side_effect=exceptions.MacAddressInUseClient())
+ def test_create_port_for_instance_mac_address_in_use(self,
+ create_port_mock):
+ # Create fake data.
+ instance = fake_instance.fake_instance_obj(self.context)
+ net = {'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': instance['project_id']}
+ zone = 'compute:%s' % instance['availability_zone']
+ port_req_body = {'port': {'device_id': instance['uuid'],
+ 'device_owner': zone,
+ 'mac_address': 'XX:XX:XX:XX:XX:XX'}}
+ available_macs = set(['XX:XX:XX:XX:XX:XX'])
+ # Run the code.
+ self.assertRaises(exception.PortInUse,
+ self.api._create_port,
+ neutronv2.get_client(self.context),
+ instance, net['id'], port_req_body,
+ available_macs=available_macs)
+ # Assert the calls.
+ create_port_mock.assert_called_once_with(port_req_body)
+
+ def test_get_network_detail_not_found(self):
+ api = neutronapi.API()
+ expected_exc = exceptions.NetworkNotFoundClient()
+ network_uuid = '02cacbca-7d48-4a2c-8011-43eecf8a9786'
+ with mock.patch.object(client.Client, 'show_network',
+ side_effect=expected_exc) as (
+ fake_show_network):
+ self.assertRaises(exception.NetworkNotFound,
+ api.get,
+ self.context,
+ network_uuid)
+ fake_show_network.assert_called_once_with(network_uuid)
+
class TestNeutronv2ModuleMethods(test.TestCase):
@@ -2482,6 +2730,7 @@ def client_mock(*args, **kwargs):
self.flags(url_timeout=30, group='neutron')
if use_id:
self.flags(admin_tenant_id='admin_tenant_id', group='neutron')
+ self.flags(admin_user_id='admin_user_id', group='neutron')
if admin_context:
my_context = context.get_admin_context()
@@ -2492,7 +2741,6 @@ def client_mock(*args, **kwargs):
kwargs = {
'auth_url': CONF.neutron.admin_auth_url,
'password': CONF.neutron.admin_password,
- 'username': CONF.neutron.admin_username,
'endpoint_url': CONF.neutron.url,
'auth_strategy': None,
'timeout': CONF.neutron.url_timeout,
@@ -2501,12 +2749,14 @@ def client_mock(*args, **kwargs):
'token': None}
if use_id:
kwargs['tenant_id'] = CONF.neutron.admin_tenant_id
+ kwargs['user_id'] = CONF.neutron.admin_user_id
else:
kwargs['tenant_name'] = CONF.neutron.admin_tenant_name
+ kwargs['username'] = CONF.neutron.admin_username
client.Client.__init__(**kwargs).WithSideEffects(client_mock)
self.mox.ReplayAll()
- #clean global
+ # clean global
token_store = neutronv2.AdminTokenStore.get()
token_store.admin_auth_token = None
if admin_context:
diff --git a/nova/tests/objects/test_aggregate.py b/nova/tests/objects/test_aggregate.py
index ca665474a4..a1d016bb2b 100644
--- a/nova/tests/objects/test_aggregate.py
+++ b/nova/tests/objects/test_aggregate.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
+
from nova import db
from nova import exception
from nova.objects import aggregate
@@ -163,6 +165,29 @@ def test_by_host(self):
self.assertEqual(1, len(aggs))
self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
+ @mock.patch('nova.db.aggregate_get_by_metadata_key')
+ def test_get_by_metadata_key(self, get_by_metadata_key):
+ get_by_metadata_key.return_value = [fake_aggregate]
+ aggs = aggregate.AggregateList.get_by_metadata_key(
+ self.context, 'this')
+ self.assertEqual(1, len(aggs))
+ self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
+
+ @mock.patch('nova.db.aggregate_get_by_metadata_key')
+ def test_get_by_metadata_key_and_hosts_no_match(self, get_by_metadata_key):
+ get_by_metadata_key.return_value = [fake_aggregate]
+ aggs = aggregate.AggregateList.get_by_metadata_key(
+ self.context, 'this', hosts=['baz'])
+ self.assertEqual(0, len(aggs))
+
+ @mock.patch('nova.db.aggregate_get_by_metadata_key')
+ def test_get_by_metadata_key_and_hosts_match(self, get_by_metadata_key):
+ get_by_metadata_key.return_value = [fake_aggregate]
+ aggs = aggregate.AggregateList.get_by_metadata_key(
+ self.context, 'this', hosts=['foo', 'bar'])
+ self.assertEqual(1, len(aggs))
+ self.compare_obj(aggs[0], fake_aggregate, subs=SUBS)
+
class TestAggregateObject(test_objects._LocalTest,
_TestAggregateObject):
diff --git a/nova/tests/objects/test_compute_node.py b/nova/tests/objects/test_compute_node.py
index 6b426e608e..c02646f3e2 100644
--- a/nova/tests/objects/test_compute_node.py
+++ b/nova/tests/objects/test_compute_node.py
@@ -21,6 +21,7 @@
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.tests.objects import test_objects
+from nova.virt import hardware
NOW = timeutils.utcnow().replace(microsecond=0)
fake_stats = {'num_foo': '10'}
@@ -28,6 +29,10 @@
# host_ip is coerced from a string to an IPAddress
# but needs to be converted to a string for the database format
fake_host_ip = '127.0.0.1'
+fake_numa_topology = hardware.VirtNUMAHostTopology(
+ cells=[hardware.VirtNUMATopologyCellUsage(0, set([1, 2]), 512),
+ hardware.VirtNUMATopologyCellUsage(1, set([3, 4]), 512)])
+fake_numa_topology_db_format = fake_numa_topology.to_json()
fake_compute_node = {
'created_at': NOW,
'updated_at': None,
@@ -53,6 +58,7 @@
'metrics': '',
'stats': fake_stats_db_format,
'host_ip': fake_host_ip,
+ 'numa_topology': fake_numa_topology_db_format,
}
@@ -89,7 +95,7 @@ def test_create(self):
compute = compute_node.ComputeNode()
compute.service_id = 456
compute.stats = fake_stats
- #NOTE (pmurray): host_ip is coerced to an IPAddress
+ # NOTE (pmurray): host_ip is coerced to an IPAddress
compute.host_ip = fake_host_ip
compute.create(self.context)
self.compare_obj(compute, fake_compute_node,
@@ -121,7 +127,7 @@ def test_save(self):
compute.id = 123
compute.vcpus_used = 3
compute.stats = fake_stats
- #NOTE (pmurray): host_ip is coerced to an IPAddress
+ # NOTE (pmurray): host_ip is coerced to an IPAddress
compute.host_ip = fake_host_ip
compute.save(self.context)
self.compare_obj(compute, fake_compute_node,
@@ -189,6 +195,11 @@ def test_get_by_service(self, service_get):
comparators={'stats': self.json_comparator,
'host_ip': self.str_comparator})
+ def test_compat_numa_topology(self):
+ compute = compute_node.ComputeNode()
+ primitive = compute.obj_to_primitive(target_version='1.4')
+ self.assertNotIn('numa_topology', primitive)
+
class TestComputeNodeObject(test_objects._LocalTest,
_TestComputeNodeObject):
diff --git a/nova/tests/objects/test_ec2.py b/nova/tests/objects/test_ec2.py
index 519a13f96e..9b3dc38b18 100644
--- a/nova/tests/objects/test_ec2.py
+++ b/nova/tests/objects/test_ec2.py
@@ -109,3 +109,84 @@ class TestEC2VolumeMapping(test_objects._LocalTest, _TestEC2VolumeMapping):
class TestRemoteEC2VolumeMapping(test_objects._RemoteTest,
_TestEC2VolumeMapping):
pass
+
+
+class _TestEC2SnapshotMapping(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_create(self):
+ smap = ec2_obj.EC2SnapshotMapping()
+ smap.uuid = 'fake-uuid-2'
+
+ with mock.patch.object(db, 'ec2_snapshot_create') as create:
+ create.return_value = fake_map
+ smap.create(self.context)
+
+ self.assertEqual(self.context, smap._context)
+ smap._context = None
+ self._compare(self, fake_map, smap)
+
+ def test_get_by_uuid(self):
+ with mock.patch.object(db, 'ec2_snapshot_get_by_uuid') as get:
+ get.return_value = fake_map
+ smap = ec2_obj.EC2SnapshotMapping.get_by_uuid(self.context,
+ 'fake-uuid-2')
+ self._compare(self, fake_map, smap)
+
+ def test_get_by_ec2_id(self):
+ with mock.patch.object(db, 'ec2_snapshot_get_by_ec2_id') as get:
+ get.return_value = fake_map
+ smap = ec2_obj.EC2SnapshotMapping.get_by_id(self.context, 1)
+ self._compare(self, fake_map, smap)
+
+
+class TestEC2SnapshotMapping(test_objects._LocalTest, _TestEC2SnapshotMapping):
+ pass
+
+
+class TestRemoteEC2SnapshotMapping(test_objects._RemoteTest,
+ _TestEC2SnapshotMapping):
+ pass
+
+
+class _TestS3ImageMapping(object):
+ @staticmethod
+ def _compare(test, db, obj):
+ for field, value in db.items():
+ test.assertEqual(db[field], obj[field])
+
+ def test_create(self):
+ s3imap = ec2_obj.S3ImageMapping()
+ s3imap.uuid = 'fake-uuid-2'
+
+ with mock.patch.object(db, 's3_image_create') as create:
+ create.return_value = fake_map
+ s3imap.create(self.context)
+
+ self.assertEqual(self.context, s3imap._context)
+ s3imap._context = None
+ self._compare(self, fake_map, s3imap)
+
+ def test_get_by_uuid(self):
+ with mock.patch.object(db, 's3_image_get_by_uuid') as get:
+ get.return_value = fake_map
+ s3imap = ec2_obj.S3ImageMapping.get_by_uuid(self.context,
+ 'fake-uuid-2')
+ self._compare(self, fake_map, s3imap)
+
+ def test_get_by_s3_id(self):
+ with mock.patch.object(db, 's3_image_get') as get:
+ get.return_value = fake_map
+ s3imap = ec2_obj.S3ImageMapping.get_by_id(self.context, 1)
+ self._compare(self, fake_map, s3imap)
+
+
+class TestS3ImageMapping(test_objects._LocalTest, _TestS3ImageMapping):
+ pass
+
+
+class TestRemoteS3ImageMapping(test_objects._RemoteTest, _TestS3ImageMapping):
+ pass
diff --git a/nova/tests/objects/test_fields.py b/nova/tests/objects/test_fields.py
index fd437a2207..cfc5b87d9f 100644
--- a/nova/tests/objects/test_fields.py
+++ b/nova/tests/objects/test_fields.py
@@ -222,6 +222,29 @@ def test_stringify(self):
'key': 'val'}))
+class TestListOfDictOfNullableStringsField(TestField):
+ def setUp(self):
+ super(TestListOfDictOfNullableStringsField, self).setUp()
+ self.field = fields.ListOfDictOfNullableStringsField()
+ self.coerce_good_values = [([{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}],
+ [{'f': 'b', 'f1': 'b1'}, {'f2': 'b2'}]),
+ ([{'f': 1}, {'f1': 'b1'}],
+ [{'f': '1'}, {'f1': 'b1'}]),
+ ([{'foo': None}], [{'foo': None}])]
+ self.coerce_bad_values = [[{1: 'a'}], ['ham', 1], ['eggs']]
+ self.to_primitive_values = [([{'f': 'b'}, {'f1': 'b1'}, {'f2': None}],
+ [{'f': 'b'}, {'f1': 'b1'}, {'f2': None}])]
+ self.from_primitive_values = [([{'f': 'b'}, {'f1': 'b1'},
+ {'f2': None}],
+ [{'f': 'b'}, {'f1': 'b1'},
+ {'f2': None}])]
+
+ def test_stringify(self):
+ self.assertEqual("[{f=None,f1='b1'},{f2='b2'}]",
+ self.field.stringify(
+ [{'f': None, 'f1': 'b1'}, {'f2': 'b2'}]))
+
+
class TestList(TestField):
def setUp(self):
super(TestList, self).setUp()
@@ -248,6 +271,34 @@ def test_stringify(self):
self.assertEqual("['abc']", self.field.stringify(['abc']))
+class TestSet(TestField):
+ def setUp(self):
+ super(TestSet, self).setUp()
+ self.field = fields.Field(fields.Set(FakeFieldType()))
+ self.coerce_good_values = [(set(['foo', 'bar']),
+ set(['*foo*', '*bar*']))]
+ self.coerce_bad_values = [['foo'], {'foo': 'bar'}]
+ self.to_primitive_values = [(set(['foo']), tuple(['!foo!']))]
+ self.from_primitive_values = [(tuple(['!foo!']), set(['foo']))]
+
+ def test_stringify(self):
+ self.assertEqual('set([123])', self.field.stringify(set([123])))
+
+
+class TestSetOfIntegers(TestField):
+ def setUp(self):
+ super(TestSetOfIntegers, self).setUp()
+ self.field = fields.SetOfIntegersField()
+ self.coerce_good_values = [(set(['1', 2]),
+ set([1, 2]))]
+ self.coerce_bad_values = [set(['foo'])]
+ self.to_primitive_values = [(set([1]), tuple([1]))]
+ self.from_primitive_values = [(tuple([1]), set([1]))]
+
+ def test_stringify(self):
+ self.assertEqual('set([1,2])', self.field.stringify(set([1, 2])))
+
+
class TestObject(TestField):
def setUp(self):
super(TestObject, self).setUp()
@@ -273,7 +324,8 @@ class OtherTestableObject(obj_base.NovaObject):
self.coerce_bad_values = [OtherTestableObject(), 1, 'foo']
self.to_primitive_values = [(test_inst, test_inst.obj_to_primitive())]
self.from_primitive_values = [(test_inst.obj_to_primitive(),
- test_inst)]
+ test_inst),
+ (test_inst, test_inst)]
def test_stringify(self):
obj = self._test_cls(uuid='fake-uuid')
diff --git a/nova/tests/objects/test_fixed_ip.py b/nova/tests/objects/test_fixed_ip.py
index 04dd4d2f49..d34cd3883c 100644
--- a/nova/tests/objects/test_fixed_ip.py
+++ b/nova/tests/objects/test_fixed_ip.py
@@ -131,13 +131,21 @@ def test_get_by_address_with_extras_deleted_instance(self, instance_get,
self.assertFalse(instance_get.called)
@mock.patch('nova.db.fixed_ip_get_by_floating_address')
- def test_get_by_floating_ip(self, get):
+ def test_get_by_floating_address(self, get):
get.return_value = fake_fixed_ip
fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context,
'1.2.3.4')
get.assert_called_once_with(self.context, '1.2.3.4')
self._compare(fixedip, fake_fixed_ip)
+ @mock.patch('nova.db.fixed_ip_get_by_floating_address')
+ def test_get_by_floating_address_none(self, get):
+ get.return_value = None
+ fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context,
+ '1.2.3.4')
+ get.assert_called_once_with(self.context, '1.2.3.4')
+ self.assertIsNone(fixedip)
+
@mock.patch('nova.db.fixed_ip_get_by_network_host')
def test_get_by_network_and_host(self, get):
get.return_value = fake_fixed_ip
diff --git a/nova/tests/objects/test_floating_ip.py b/nova/tests/objects/test_floating_ip.py
index a48756ef03..0f3d6fd593 100644
--- a/nova/tests/objects/test_floating_ip.py
+++ b/nova/tests/objects/test_floating_ip.py
@@ -130,13 +130,32 @@ def test_save(self, update):
floatingip = floating_ip.FloatingIP(context=self.context,
id=123, address='1.2.3.4',
host='foo')
- self.assertRaises(exception.ObjectActionError, floatingip.save)
floatingip.obj_reset_changes(['address', 'id'])
floatingip.save()
self.assertEqual(set(), floatingip.obj_what_changed())
update.assert_called_with(self.context, '1.2.3.4',
{'host': 'foo'})
+ def test_save_errors(self):
+ floatingip = floating_ip.FloatingIP(context=self.context,
+ id=123, host='foo')
+ floatingip.obj_reset_changes()
+ floating_ip.address = '1.2.3.4'
+ self.assertRaises(exception.ObjectActionError, floatingip.save)
+
+ floatingip.obj_reset_changes()
+ floatingip.fixed_ip_id = 1
+ self.assertRaises(exception.ObjectActionError, floatingip.save)
+
+ @mock.patch('nova.db.floating_ip_update')
+ def test_save_no_fixedip(self, update):
+ update.return_value = fake_floating_ip
+ floatingip = floating_ip.FloatingIP(context=self.context,
+ id=123)
+ floatingip.fixed_ip = objects.FixedIP(context=self.context,
+ id=456)
+ self.assertNotIn('fixed_ip', update.calls[1])
+
@mock.patch('nova.db.floating_ip_get_all')
def test_get_all(self, get):
get.return_value = [fake_floating_ip]
diff --git a/nova/tests/objects/test_instance.py b/nova/tests/objects/test_instance.py
index c3bb88f1bd..8220e4d56a 100644
--- a/nova/tests/objects/test_instance.py
+++ b/nova/tests/objects/test_instance.py
@@ -21,7 +21,6 @@
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import flavors
-from nova import context
from nova import db
from nova import exception
from nova.network import model as network_model
@@ -105,7 +104,7 @@ def test_get_without_expected(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, 'uuid',
columns_to_join=[],
- use_slave=False
+ use_subordinate=False
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, 'uuid',
@@ -124,7 +123,7 @@ def test_get_with_expected(self):
db.instance_get_by_uuid(
self.context, 'uuid',
columns_to_join=exp_cols,
- use_slave=False
+ use_subordinate=False
).AndReturn(self.fake_instance)
fake_faults = test_instance_fault.fake_faults
db.instance_fault_get_by_instance_uuids(
@@ -156,13 +155,13 @@ def test_load(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(self.fake_instance)
fake_inst2 = dict(self.fake_instance,
system_metadata=[{'key': 'foo', 'value': 'bar'}])
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['system_metadata'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst2)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
@@ -187,7 +186,7 @@ def test_get_remote(self):
db.instance_get_by_uuid(self.context, 'fake-uuid',
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_instance)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, 'fake-uuid')
@@ -205,13 +204,13 @@ def test_refresh(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(dict(self.fake_instance,
host='orig-host'))
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(dict(self.fake_instance,
host='new-host'))
self.mox.StubOutWithMock(instance_info_cache.InstanceInfoCache,
@@ -233,7 +232,7 @@ def test_refresh_does_not_recurse(self):
self.mox.StubOutWithMock(instance.Instance, 'get_by_uuid')
instance.Instance.get_by_uuid(self.context, uuid=inst.uuid,
expected_attrs=['metadata'],
- use_slave=False
+ use_subordinate=False
).AndReturn(inst_copy)
self.mox.ReplayAll()
self.assertRaises(exception.OrphanedObjectError, inst.refresh)
@@ -280,7 +279,7 @@ def _save_test_helper(self, cell_type, save_kwargs):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates,
@@ -360,7 +359,7 @@ def test_save_rename_sends_notification(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates, update_cells=False,
@@ -373,13 +372,29 @@ def test_save_rename_sends_notification(self):
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, old_ref['uuid'],
- use_slave=False)
+ use_subordinate=False)
self.assertEqual('hello', inst.display_name)
inst.display_name = 'goodbye'
inst.save()
self.assertEqual('goodbye', inst.display_name)
self.assertEqual(set([]), inst.obj_what_changed())
+ @mock.patch('nova.db.instance_update_and_get_original')
+ @mock.patch('nova.objects.Instance._from_db_object')
+ def test_save_does_not_refresh_pci_devices(self, mock_fdo, mock_update):
+ # NOTE(danms): This tests that we don't update the pci_devices
+ # field from the contents of the database. This is not because we
+ # don't necessarily want to, but because the way pci_devices is
+ # currently implemented it causes versioning issues. When that is
+ # resolved, this test should go away.
+ mock_update.return_value = None, None
+ inst = instance.Instance(context=self.context, id=123)
+ inst.uuid = 'foo'
+ inst.pci_devices = pci_device.PciDeviceList()
+ inst.save()
+ self.assertNotIn('pci_devices',
+ mock_fdo.call_args_list[0][1]['expected_attrs'])
+
def test_get_deleted(self):
fake_inst = dict(self.fake_instance, id=123, deleted=123)
fake_uuid = fake_inst['uuid']
@@ -387,7 +402,7 @@ def test_get_deleted(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
@@ -401,7 +416,7 @@ def test_get_not_cleaned(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
@@ -415,7 +430,7 @@ def test_get_cleaned(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
@@ -439,7 +454,7 @@ def test_with_info_cache(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
db.instance_info_cache_update(self.context, fake_uuid,
{'network_info': nwinfo2_json})
@@ -456,7 +471,7 @@ def test_with_info_cache_none(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
@@ -482,7 +497,7 @@ def test_with_security_groups(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
db.security_group_update(self.context, 1, {'description': 'changed'}
).AndReturn(fake_inst['security_groups'][0])
@@ -507,7 +522,7 @@ def test_with_empty_security_groups(self):
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid)
@@ -519,7 +534,7 @@ def test_with_empty_pci_devices(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['pci_devices'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
@@ -565,7 +580,7 @@ def test_with_pci_devices(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['pci_devices'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = instance.Instance.get_by_uuid(self.context, fake_uuid,
@@ -583,7 +598,7 @@ def test_with_fault(self):
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=[],
- use_slave=False
+ use_subordinate=False
).AndReturn(self.fake_instance)
db.instance_fault_get_by_instance_uuids(
self.context, [fake_uuid]).AndReturn({fake_uuid: fake_faults})
@@ -837,6 +852,51 @@ def test_reset_changes(self):
self.assertEqual({'1985': 'present'}, inst._orig_metadata)
self.assertEqual({}, inst._orig_system_metadata)
+ def test_load_generic_calls_handler(self):
+ inst = instance.Instance(context=self.context,
+ uuid='fake-uuid')
+ with mock.patch.object(inst, '_load_generic') as mock_load:
+ def fake_load(name):
+ inst.system_metadata = {}
+
+ mock_load.side_effect = fake_load
+ inst.system_metadata
+ mock_load.assert_called_once_with('system_metadata')
+
+ def test_load_fault_calls_handler(self):
+ inst = instance.Instance(context=self.context,
+ uuid='fake-uuid')
+ with mock.patch.object(inst, '_load_fault') as mock_load:
+ def fake_load():
+ inst.fault = None
+
+ mock_load.side_effect = fake_load
+ inst.fault
+ mock_load.assert_called_once_with()
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ def test_load_generic(self, mock_get):
+ inst2 = instance.Instance(metadata={'foo': 'bar'})
+ mock_get.return_value = inst2
+ inst = instance.Instance(context=self.context,
+ uuid='fake-uuid')
+ inst.metadata
+ self.assertEqual({'foo': 'bar'}, inst.metadata)
+ mock_get.assert_called_once_with(self.context,
+ uuid='fake-uuid',
+ expected_attrs=['metadata'])
+ self.assertNotIn('metadata', inst.obj_what_changed())
+
+ @mock.patch('nova.db.instance_fault_get_by_instance_uuids')
+ def test_load_fault(self, mock_get):
+ fake_fault = test_instance_fault.fake_faults['fake-uuid'][0]
+ mock_get.return_value = {'fake': [fake_fault]}
+ inst = instance.Instance(context=self.context, uuid='fake')
+ fault = inst.fault
+ mock_get.assert_called_once_with(self.context, ['fake'])
+ self.assertEqual(fake_fault['id'], fault.id)
+ self.assertNotIn('metadata', inst.obj_what_changed())
+
class TestInstanceObject(test_objects._LocalTest,
_TestInstanceObject):
@@ -875,11 +935,11 @@ def test_get_all_by_filters(self):
db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid',
'asc', limit=None, marker=None,
columns_to_join=['metadata'],
- use_slave=False).AndReturn(fakes)
+ use_subordinate=False).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, 'uuid', 'asc',
- expected_attrs=['metadata'], use_slave=False)
+ expected_attrs=['metadata'], use_subordinate=False)
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
@@ -896,12 +956,12 @@ def test_get_all_by_filters_works_for_cleaned(self):
{'deleted': True, 'cleaned': False},
'uuid', 'asc', limit=None, marker=None,
columns_to_join=['metadata'],
- use_slave=False).AndReturn(
+ use_subordinate=False).AndReturn(
[fakes[1]])
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_filters(
self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc',
- expected_attrs=['metadata'], use_slave=False)
+ expected_attrs=['metadata'], use_subordinate=False)
self.assertEqual(1, len(inst_list))
self.assertIsInstance(inst_list.objects[0], instance.Instance)
@@ -914,7 +974,7 @@ def test_get_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
db.instance_get_all_by_host(self.context, 'foo',
columns_to_join=None,
- use_slave=False).AndReturn(fakes)
+ use_subordinate=False).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = instance.InstanceList.get_by_host(self.context, 'foo')
for i in range(0, len(fakes)):
@@ -1002,7 +1062,7 @@ def test_with_fault(self):
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_all_by_host(self.context, 'host',
columns_to_join=[],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_insts)
db.instance_fault_get_by_instance_uuids(
self.context, [x['uuid'] for x in fake_insts]
@@ -1010,7 +1070,7 @@ def test_with_fault(self):
self.mox.ReplayAll()
instances = instance.InstanceList.get_by_host(self.context, 'host',
expected_attrs=['fault'],
- use_slave=False)
+ use_subordinate=False)
self.assertEqual(2, len(instances))
self.assertEqual(fake_faults['fake-uuid'][0],
dict(instances[0].fault.iteritems()))
@@ -1089,14 +1149,3 @@ def test_expected_cols(self):
self.stubs.Set(instance, '_INSTANCE_OPTIONAL_JOINED_FIELDS', ['bar'])
self.assertEqual(['bar'], instance._expected_cols(['foo', 'bar']))
self.assertIsNone(instance._expected_cols(None))
-
-
-class TestAddImageRef(test.TestCase):
- @mock.patch('nova.objects.BlockDeviceMappingList.root_metadata')
- def test_add_image_ref(self, mock_root_metadata):
- mock_root_metadata.return_value = {'image_id': 'fake_image'}
- fake_instance = fakes.stub_instance(id=1, uuid=fakes.FAKE_UUID,
- image_ref='')
- ctx = context.RequestContext('fake-user', 'fake-project')
- new_instance = instance.add_image_ref(ctx, fake_instance)
- self.assertEqual('fake_image', new_instance['image_ref'])
diff --git a/nova/tests/objects/test_instance_group.py b/nova/tests/objects/test_instance_group.py
index c2093ef047..3099059ff8 100644
--- a/nova/tests/objects/test_instance_group.py
+++ b/nova/tests/objects/test_instance_group.py
@@ -38,23 +38,19 @@ def _get_default_values(self):
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
- metadata=None, members=None):
+ members=None):
return db.instance_group_create(context, values, policies=policies,
- metadata=metadata, members=members)
+ members=members)
def test_get_by_uuid(self):
values = self._get_default_values()
- metadata = {'key11': 'value1',
- 'key12': 'value2'}
policies = ['policy1', 'policy2']
members = ['instance_id1', 'instance_id2']
db_result = self._create_instance_group(self.context, values,
- metadata=metadata,
policies=policies,
members=members)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
- self.assertEqual(obj_result.metadetails, metadata)
self.assertEqual(obj_result.members, members)
self.assertEqual(obj_result.policies, policies)
@@ -105,18 +101,6 @@ def test_save_members(self):
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['members'], members)
- def test_save_metadata(self):
- values = self._get_default_values()
- db_result = self._create_instance_group(self.context, values)
- obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
- db_result.uuid)
- metadata = {'foo': 'bar'}
- obj_result.metadetails = metadata
- obj_result.save()
- db.instance_group_metadata_get(self.context, db_result['uuid'])
- for key, value in metadata.iteritems():
- self.assertEqual(value, metadata[key])
-
def test_create(self):
group1 = instance_group.InstanceGroup()
group1.uuid = 'fake-uuid'
@@ -150,17 +134,6 @@ def test_create_with_members(self):
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.members, group2.members)
- def test_create_with_metadata(self):
- group1 = instance_group.InstanceGroup()
- metadata = {'foo': 'bar'}
- group1.metadetails = metadata
- group1.create(self.context)
- group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
- group1.uuid)
- self.assertEqual(group1.id, group2.id)
- for key, value in metadata.iteritems():
- self.assertEqual(value, group2.metadetails[key])
-
def test_recreate_fails(self):
group = instance_group.InstanceGroup()
group.create(self.context)
@@ -296,6 +269,14 @@ def test_get_hosts_with_some_none(self):
self.assertEqual(1, len(hosts))
self.assertIn('hostB', hosts)
+ def test_obj_make_compatible(self):
+ group = instance_group.InstanceGroup(uuid='fake-uuid',
+ name='fake-name')
+ group.create(self.context)
+ group_primitive = group.obj_to_primitive()
+ group.obj_make_compatible(group_primitive, '1.6')
+ self.assertEqual({}, group_primitive['metadetails'])
+
class TestInstanceGroupObject(test_objects._LocalTest,
_TestInstanceGroupObjects):
diff --git a/nova/tests/objects/test_migration.py b/nova/tests/objects/test_migration.py
index 23e1b857a0..ed4b4ea5af 100644
--- a/nova/tests/objects/test_migration.py
+++ b/nova/tests/objects/test_migration.py
@@ -116,7 +116,7 @@ def test_instance(self):
db.instance_get_by_uuid(ctxt, fake_migration['instance_uuid'],
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(fake_inst)
mig = migration.Migration._from_db_object(ctxt,
migration.Migration(),
@@ -133,11 +133,11 @@ def test_get_unconfirmed_by_dest_compute(self):
db, 'migration_get_unconfirmed_by_dest_compute')
db.migration_get_unconfirmed_by_dest_compute(
ctxt, 'window', 'foo',
- use_slave=False).AndReturn(db_migrations)
+ use_subordinate=False).AndReturn(db_migrations)
self.mox.ReplayAll()
migrations = (
migration.MigrationList.get_unconfirmed_by_dest_compute(
- ctxt, 'window', 'foo', use_slave=False))
+ ctxt, 'window', 'foo', use_subordinate=False))
self.assertEqual(2, len(migrations))
for index, db_migration in enumerate(db_migrations):
self.compare_obj(migrations[index], db_migration)
diff --git a/nova/tests/objects/test_objects.py b/nova/tests/objects/test_objects.py
index 4d593363ee..efe907e636 100644
--- a/nova/tests/objects/test_objects.py
+++ b/nova/tests/objects/test_objects.py
@@ -30,10 +30,20 @@
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import jsonutils
+from nova.openstack.common import log
from nova.openstack.common import timeutils
from nova import rpc
from nova import test
from nova.tests import fake_notifier
+from nova import utils
+
+
+LOG = log.getLogger(__name__)
+
+
+class MyOwnedObject(base.NovaPersistentObject, base.NovaObject):
+ VERSION = '1.0'
+ fields = {'baz': fields.Field(fields.Integer())}
class MyObj(base.NovaPersistentObject, base.NovaObject):
@@ -42,6 +52,7 @@ class MyObj(base.NovaPersistentObject, base.NovaObject):
'bar': fields.Field(fields.String()),
'missing': fields.Field(fields.String()),
'readonly': fields.Field(fields.Integer(), read_only=True),
+ 'rel_object': fields.ObjectField('MyOwnedObject', nullable=True)
}
@staticmethod
@@ -88,11 +99,12 @@ def modify_save_modify(self, context):
self.bar = 'meow'
self.save()
self.foo = 42
+ self.rel_object = MyOwnedObject(baz=42)
def obj_make_compatible(self, primitive, target_version):
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
- if target_version == '1.1':
+ if target_version == '1.1' and 'bar' in primitive:
primitive['bar'] = 'old%s' % primitive['bar']
@@ -302,7 +314,7 @@ def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
def json_comparator(self, expected, obj_val):
# json-ify an object field for comparison with its db str
- #equivalent
+ # equivalent
self.assertEqual(expected, jsonutils.dumps(obj_val))
def str_comparator(self, expected, obj_val):
@@ -578,9 +590,10 @@ def test_changed_4(self):
obj.bar = 'something'
self.assertEqual(obj.obj_what_changed(), set(['bar']))
obj.modify_save_modify(self.context)
- self.assertEqual(obj.obj_what_changed(), set(['foo']))
+ self.assertEqual(obj.obj_what_changed(), set(['foo', 'rel_object']))
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 'meow')
+ self.assertIsInstance(obj.rel_object, MyOwnedObject)
self.assertRemotes()
def test_changed_with_sub_object(self):
@@ -664,7 +677,8 @@ def test_get(self):
def test_object_inheritance(self):
base_fields = base.NovaPersistentObject.fields.keys()
- myobj_fields = ['foo', 'bar', 'missing', 'readonly'] + base_fields
+ myobj_fields = ['foo', 'bar', 'missing',
+ 'readonly', 'rel_object'] + base_fields
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
@@ -712,7 +726,7 @@ def test_obj_repr(self):
obj = MyObj(foo=123)
self.assertEqual('MyObj(bar=>,created_at=>,deleted=>,'
'deleted_at=>,foo=123,missing=>,readonly=>,'
- 'updated_at=>)', repr(obj))
+ 'rel_object=>,updated_at=>)', repr(obj))
class TestObject(_LocalTest, _TestObject):
@@ -892,6 +906,23 @@ def test_object_serialization_iterables(self):
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
+ # dict case
+ thing = {'key': obj}
+ primitive = ser.serialize_entity(self.context, thing)
+ self.assertEqual(1, len(primitive))
+ for item in primitive.itervalues():
+ self.assertNotIsInstance(item, base.NovaObject)
+ thing2 = ser.deserialize_entity(self.context, primitive)
+ self.assertEqual(1, len(thing2))
+ for item in thing2.itervalues():
+ self.assertIsInstance(item, MyObj)
+
+ # object-action updates dict case
+ thing = {'foo': obj.obj_to_primitive()}
+ primitive = ser.serialize_entity(self.context, thing)
+ self.assertEqual(thing, primitive)
+ thing2 = ser.deserialize_entity(self.context, thing)
+ self.assertIsInstance(thing2['foo'], base.NovaObject)
# NOTE(danms): The hashes in this list should only be changed if
@@ -899,64 +930,83 @@ def test_object_serialization_iterables(self):
# objects
object_data = {
'Agent': '1.0-c4ff8a833aee8ae44ab8aed1a171273d',
- 'AgentList': '1.0-f8b860e1f2ce80e676ba1a37ddf86e4f',
+ 'AgentList': '1.0-31f07426a729311a42ff7f6246e76e25',
'Aggregate': '1.1-f5d477be06150529a9b2d27cc49030b5',
- 'AggregateList': '1.1-3e67b6a4840b19c797504cc6056b27ff',
+ 'AggregateList': '1.2-4b02a285b8612bfb86a96ff80052fb0a',
'BlockDeviceMapping': '1.1-9968ffe513e7672484b0f528b034cd0f',
- 'BlockDeviceMappingList': '1.2-d6d7df540ca149dda78b22b4b10bdef3',
- 'ComputeNode': '1.4-ed20e7a7c1a4612fe7d2836d5887c726',
- 'ComputeNodeList': '1.3-ff59187056eaa96f6fd3fb70693d818c',
+ 'BlockDeviceMappingList': '1.2-a6df0a8ef84d6bbaba51143499e9bed2',
+ 'ComputeNode': '1.5-57ce5a07c727ffab6c51723bb8dccbfe',
+ 'ComputeNodeList': '1.4-a993fa58c16f423c72496c7555e99987',
'DNSDomain': '1.0-5bdc288d7c3b723ce86ede998fd5c9ba',
- 'DNSDomainList': '1.0-6e3cc498d89dd7e90f9beb021644221c',
+ 'DNSDomainList': '1.0-cfb3e7e82be661501c31099523154db4',
'EC2InstanceMapping': '1.0-627baaf4b12c9067200979bdc4558a99',
+ 'EC2SnapshotMapping': '1.0-26cf315be1f8abab4289d4147671c836',
'EC2VolumeMapping': '1.0-2f8c3bf077c65a425294ec2b361c9143',
'FixedIP': '1.1-082fb26772ce2db783ce4934edca4652',
- 'FixedIPList': '1.1-8ea5cfca611598f1242fd4095e49e58b',
+ 'FixedIPList': '1.1-c12d1165c88fa721ab8abcf502fa1b29',
'Flavor': '1.1-096cfd023c35d07542cf732fb29b45e4',
- 'FlavorList': '1.1-d559595f55936a6d602721c3bdff6fff',
+ 'FlavorList': '1.1-a3d5551267cb8f62ff38ded125900721',
'FloatingIP': '1.1-27eb68b7c9c620dd5f0561b5a3be0e82',
- 'FloatingIPList': '1.2-1b77acb3523d16e3282624f51fee60d8',
+ 'FloatingIPList': '1.2-6c5b0b4d4a4c17575f4d91bae14e5237',
'Instance': '1.13-c9cfd71ddc9d6e7e7c72879f4d5982ee',
'InstanceAction': '1.1-6b1d0a6dbd522b5a83c20757ec659663',
'InstanceActionEvent': '1.1-f144eaa9fb22f248fc41ed8401a3a1be',
- 'InstanceActionEventList': '1.0-937f4ed414ff2354de416834b948fbd6',
- 'InstanceActionList': '1.0-d46ade45deeba63c55821e22c164bd1b',
+ 'InstanceActionEventList': '1.0-1d5cc958171d6ce07383c2ad6208318e',
+ 'InstanceActionList': '1.0-368410fdb8d69ae20c495308535d6266',
'InstanceExternalEvent': '1.0-f1134523654407a875fd59b80f759ee7',
'InstanceFault': '1.2-313438e37e9d358f3566c85f6ddb2d3e',
- 'InstanceFaultList': '1.1-bd578be60d045629ca7b3ce1a2493ae4',
- 'InstanceGroup': '1.6-c032430832b3cbaf92c99088e4b2fdc8',
- 'InstanceGroupList': '1.2-bebd07052779ae3b47311efe85428a8b',
+ 'InstanceFaultList': '1.1-aeb598ffd0cd6aa61fca7adf0f5e900d',
+ 'InstanceGroup': '1.7-b31ea31fdb452ab7810adbe789244f91',
+ 'InstanceGroupList': '1.2-a474822eebc3e090012e581adcc1fa09',
'InstanceInfoCache': '1.5-ef64b604498bfa505a8c93747a9d8b2f',
- 'InstanceList': '1.6-78800140a5f9818ab00f8c052437655f',
+ 'InstanceList': '1.7-71e48495e83df551cefe6691478c865c',
'KeyPair': '1.1-3410f51950d052d861c11946a6ae621a',
- 'KeyPairList': '1.0-854cfff138dac9d5925c89cf805d1a70',
+ 'KeyPairList': '1.0-71132a568cc5d078ba1748a9c02c87b8',
'Migration': '1.1-67c47726c2c71422058cd9d149d6d3ed',
- 'MigrationList': '1.1-6ca2ebb822ebfe1a660bace824b378c6',
- 'MyObj': '1.6-9039bc29de1c08943771407697c83076',
+ 'MigrationList': '1.1-8c5f678edc72a592d591a13b35e54353',
+ 'MyObj': '1.6-55bfc22259fd3df239e4a49fa3552c93',
+ 'MyOwnedObject': '1.0-0f3d6c028543d7f3715d121db5b8e298',
'Network': '1.2-2ea21ede5e45bb80e7b7ac7106915c4e',
- 'NetworkList': '1.2-16510568c6e64cb8b358cb2b11333196',
+ 'NetworkList': '1.2-aa4ad23f035b97a41732ea8b3445fc5e',
'PciDevice': '1.1-523c46f960d93f78db55f0280b09441e',
- 'PciDeviceList': '1.0-5da7b4748a5a2594bae2cd0bd211cca2',
- 'Quotas': '1.0-1933ffdc585c205445331fe842567eb3',
- 'QuotasNoOp': '1.0-187356d5a8b8e4a3505148ea4e96cfcb',
+ 'PciDeviceList': '1.0-43d6c4ea0dd77955e97b23d937a3f925',
+ 'Quotas': '1.1-7897deef00e6cd3095c8916f68d24418',
+ 'QuotasNoOp': '1.1-4b06fd721c586b907ddd6543a00d6c2f',
+ 'S3ImageMapping': '1.0-9225943a44a91ad0349b9fd8bd3f3ce2',
'SecurityGroup': '1.1-bba0e72865e0953793e796571692453b',
- 'SecurityGroupList': '1.0-9513387aabf08c2a7961ac4da4315ed4',
- 'SecurityGroupRule': '1.0-fdd020bdd7eb8bac744ad6f9a4ef8165',
- 'SecurityGroupRuleList': '1.0-af4deeea8699ee90fb217f77d711d781',
- 'Service': '1.2-5a3df338c669e1148251431370b440ef',
- 'ServiceList': '1.0-ae64b4922df28d7cd11c59cddddf926c',
- 'TestSubclassedObject': '1.6-1629421d83f474b7fadc41d3fc0e4998',
+ 'SecurityGroupList': '1.0-528e6448adfeeb78921ebeda499ab72f',
+ 'SecurityGroupRule': '1.1-a9175baf7664439af1a16c2010b55576',
+ 'SecurityGroupRuleList': '1.1-667fca3a9928f23d2d10e61962c55f3c',
+ 'Service': '1.3-5a3df338c669e1148251431370b440ef',
+ 'ServiceList': '1.1-818bc6a463721e42fbb4fbf6f68c4eeb',
+ 'TestSubclassedObject': '1.6-c63feb2f2533b7d075490c04a2cc10dd',
'VirtualInterface': '1.0-10fdac4c704102b6d57d6936d6d790d2',
- 'VirtualInterfaceList': '1.0-dc9e9d5bce522d28f96092c49119b3e0',
+ 'VirtualInterfaceList': '1.0-accbf02628a8063c1d885077a2bf49b6',
+}
+
+
+object_relationships = {
+ 'BlockDeviceMapping': {'Instance': '1.13'},
+ 'FixedIP': {'Instance': '1.13', 'Network': '1.2',
+ 'VirtualInterface': '1.0'},
+ 'FloatingIP': {'FixedIP': '1.1'},
+ 'Instance': {'InstanceFault': '1.2',
+ 'InstanceInfoCache': '1.5',
+ 'PciDeviceList': '1.0',
+ 'SecurityGroupList': '1.0'},
+ 'MyObj': {'MyOwnedObject': '1.0'},
+ 'SecurityGroupRule': {'SecurityGroup': '1.1'},
+ 'Service': {'ComputeNode': '1.5'},
+ 'TestSubclassedObject': {'MyOwnedObject': '1.0'}
}
class TestObjectVersions(test.TestCase):
def setUp(self):
super(TestObjectVersions, self).setUp()
- self._fingerprints = {}
- def _get_fingerprint(self, obj_class):
+ def _get_fingerprint(self, obj_name):
+ obj_class = base.NovaObject._obj_classes[obj_name][0]
fields = obj_class.fields.items()
fields.sort()
methods = []
@@ -970,31 +1020,83 @@ def _get_fingerprint(self, obj_class):
# Of course, these are just the mechanical changes we can detect,
# but many other things may require a version bump (method behavior
# and return value changes, for example).
- relevant_data = (fields, methods)
- return '%s-%s' % (obj_class.VERSION,
- hashlib.md5(str(relevant_data)).hexdigest())
-
- def _test_versions_cls(self, obj_name):
- obj_class = base.NovaObject._obj_classes[obj_name][0]
- expected_fingerprint = object_data.get(obj_name, 'unknown')
- actual_fingerprint = self._get_fingerprint(obj_class)
-
- self._fingerprints[obj_name] = actual_fingerprint
-
- if os.getenv('GENERATE_HASHES'):
- return
-
- self.assertEqual(
- expected_fingerprint, actual_fingerprint,
- ('%s object has changed; please make sure the version '
- 'has been bumped, and then update this hash') % obj_name)
+ if hasattr(obj_class, 'child_versions'):
+ relevant_data = (fields, methods, obj_class.child_versions)
+ else:
+ relevant_data = (fields, methods)
+ fingerprint = '%s-%s' % (obj_class.VERSION,
+ hashlib.md5(str(relevant_data)).hexdigest())
+ return fingerprint
def test_versions(self):
+ fingerprints = {}
for obj_name in base.NovaObject._obj_classes:
- self._test_versions_cls(obj_name)
+ fingerprints[obj_name] = self._get_fingerprint(obj_name)
if os.getenv('GENERATE_HASHES'):
file('object_hashes.txt', 'w').write(
- pprint.pformat(self._fingerprints))
+ pprint.pformat(fingerprints))
raise test.TestingException(
'Generated hashes in object_hashes.txt')
+
+ stored = set(object_data.items())
+ computed = set(fingerprints.items())
+ changed = stored - computed
+ expected = {}
+ actual = {}
+ for name, hash in changed:
+ expected[name] = object_data.get(name)
+ actual[name] = fingerprints.get(name)
+
+ self.assertEqual(expected, actual,
+ 'Some objects have changed; please make sure the '
+ 'versions have been bumped, and then update their '
+ 'hashes here.')
+
+ def _build_tree(self, tree, obj_class):
+ obj_name = obj_class.obj_name()
+ if obj_name in tree:
+ return
+
+ for name, field in obj_class.fields.items():
+ if isinstance(field._type, fields.Object):
+ sub_obj_name = field._type._obj_name
+ sub_obj_class = base.NovaObject._obj_classes[sub_obj_name][0]
+ self._build_tree(tree, sub_obj_class)
+ tree.setdefault(obj_name, {})
+ tree[obj_name][sub_obj_name] = sub_obj_class.VERSION
+
+ def test_relationships(self):
+ tree = {}
+ for obj_name in base.NovaObject._obj_classes.keys():
+ self._build_tree(tree, base.NovaObject._obj_classes[obj_name][0])
+
+ stored = set([(x, str(y)) for x, y in object_relationships.items()])
+ computed = set([(x, str(y)) for x, y in tree.items()])
+ changed = stored - computed
+ expected = {}
+ actual = {}
+ for name, deps in changed:
+ expected[name] = object_relationships.get(name)
+ actual[name] = tree.get(name)
+ self.assertEqual(expected, actual,
+ 'Some objects have changed dependencies. '
+ 'Please make sure to bump the versions of '
+ 'parent objects and provide a rule in their '
+ 'obj_make_compatible() routines to backlevel '
+ 'the child object.')
+
+ def test_obj_make_compatible(self):
+ # Iterate all object classes and verify that we can run
+ # obj_make_compatible with every older version than current.
+ # This doesn't actually test the data conversions, but it at least
+ # makes sure the method doesn't blow up on something basic like
+ # expecting the wrong version format.
+ for obj_name in base.NovaObject._obj_classes:
+ obj_class = base.NovaObject._obj_classes[obj_name][0]
+ version = utils.convert_version_to_tuple(obj_class.VERSION)
+ for n in range(version[1]):
+ test_version = '%d.%d' % (version[0], n)
+ LOG.info('testing obj: %s version: %s' %
+ (obj_name, test_version))
+ obj_class().obj_to_primitive(target_version=test_version)
diff --git a/nova/tests/objects/test_quotas.py b/nova/tests/objects/test_quotas.py
index daae2b52a2..c2a9892a7d 100644
--- a/nova/tests/objects/test_quotas.py
+++ b/nova/tests/objects/test_quotas.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
+
from nova import context
from nova.objects import quotas as quotas_obj
from nova import quota
@@ -142,6 +144,20 @@ def test_rollback_none_reservations(self):
self.mox.ReplayAll()
quotas.rollback()
+ @mock.patch('nova.db.quota_create')
+ def test_create_limit(self, mock_create):
+ quotas_obj.Quotas.create_limit(self.context, 'fake-project',
+ 'foo', 10, user_id='user')
+ mock_create.assert_called_once_with(self.context, 'fake-project',
+ 'foo', 10, user_id='user')
+
+ @mock.patch('nova.db.quota_update')
+ def test_update_limit(self, mock_update):
+ quotas_obj.Quotas.update_limit(self.context, 'fake-project',
+ 'foo', 10, user_id='user')
+ mock_update.assert_called_once_with(self.context, 'fake-project',
+ 'foo', 10, user_id='user')
+
class TestQuotasObject(_TestQuotasObject, test_objects._LocalTest):
pass
diff --git a/nova/tests/objects/test_security_group_rule.py b/nova/tests/objects/test_security_group_rule.py
index 4cd7d95758..e2c5294403 100644
--- a/nova/tests/objects/test_security_group_rule.py
+++ b/nova/tests/objects/test_security_group_rule.py
@@ -15,8 +15,8 @@
import mock
from nova import db
-from nova.objects import security_group
-from nova.objects import security_group_rule
+from nova import exception
+from nova import objects
from nova.tests.objects import test_objects
from nova.tests.objects import test_security_group
@@ -37,7 +37,7 @@ class _TestSecurityGroupRuleObject(object):
def test_get_by_id(self):
with mock.patch.object(db, 'security_group_rule_get') as sgrg:
sgrg.return_value = fake_rule
- rule = security_group_rule.SecurityGroupRule.get_by_id(
+ rule = objects.SecurityGroupRule.get_by_id(
self.context, 1)
for field in fake_rule:
if field == 'cidr':
@@ -47,18 +47,43 @@ def test_get_by_id(self):
sgrg.assert_called_with(self.context, 1)
def test_get_by_security_group(self):
- secgroup = security_group.SecurityGroup()
+ secgroup = objects.SecurityGroup()
secgroup.id = 123
rule = dict(fake_rule)
rule['grantee_group'] = dict(test_security_group.fake_secgroup, id=123)
stupid_method = 'security_group_rule_get_by_security_group'
with mock.patch.object(db, stupid_method) as sgrgbsg:
sgrgbsg.return_value = [rule]
- rules = (security_group_rule.SecurityGroupRuleList.
+ rules = (objects.SecurityGroupRuleList.
get_by_security_group(self.context, secgroup))
self.assertEqual(1, len(rules))
self.assertEqual(123, rules[0].grantee_group.id)
+ @mock.patch.object(db, 'security_group_rule_create',
+ return_value=fake_rule)
+ def test_create(self, db_mock):
+ rule = objects.SecurityGroupRule()
+ rule.protocol = 'tcp'
+ secgroup = objects.SecurityGroup()
+ secgroup.id = 123
+ parentgroup = objects.SecurityGroup()
+ parentgroup.id = 223
+ rule.grantee_group = secgroup
+ rule.parent_group = parentgroup
+ rule.create(self.context)
+ updates = db_mock.call_args[0][1]
+ self.assertEqual(fake_rule['id'], rule.id)
+ self.assertEqual(updates['group_id'], rule.grantee_group.id)
+ self.assertEqual(updates['parent_group_id'], rule.parent_group.id)
+
+ @mock.patch.object(db, 'security_group_rule_create',
+ return_value=fake_rule)
+ def test_set_id_failure(self, db_mock):
+ rule = objects.SecurityGroupRule()
+ rule.create(self.context)
+ self.assertRaises(exception.ReadOnlyFieldError, setattr,
+ rule, 'id', 124)
+
class TestSecurityGroupRuleObject(test_objects._LocalTest,
_TestSecurityGroupRuleObject):
diff --git a/nova/tests/objects/test_service.py b/nova/tests/objects/test_service.py
index 2d8ae52ce6..8951e9a0dc 100644
--- a/nova/tests/objects/test_service.py
+++ b/nova/tests/objects/test_service.py
@@ -16,6 +16,7 @@
from nova import db
from nova import exception
+from nova.objects import aggregate
from nova.objects import service
from nova.openstack.common import timeutils
from nova.tests.objects import test_compute_node
@@ -164,12 +165,17 @@ def test_get_all(self):
def test_get_all_with_az(self):
self.mox.StubOutWithMock(db, 'service_get_all')
- self.mox.StubOutWithMock(db, 'aggregate_host_get_by_metadata_key')
+ self.mox.StubOutWithMock(aggregate.AggregateList,
+ 'get_by_metadata_key')
db.service_get_all(self.context, disabled=None).AndReturn(
[dict(fake_service, topic='compute')])
- db.aggregate_host_get_by_metadata_key(
- self.context, key='availability_zone').AndReturn(
- {fake_service['host']: ['test-az']})
+ agg = aggregate.Aggregate()
+ agg.name = 'foo'
+ agg.metadata = {'availability_zone': 'test-az'}
+ agg.create(self.context)
+ agg.hosts = [fake_service['host']]
+ aggregate.AggregateList.get_by_metadata_key(self.context,
+ 'availability_zone', hosts=set(agg.hosts)).AndReturn([agg])
self.mox.ReplayAll()
services = service.ServiceList.get_all(self.context, set_zones=True)
self.assertEqual(1, len(services))
diff --git a/nova/tests/pci/test_pci_devspec.py b/nova/tests/pci/test_pci_devspec.py
new file mode 100644
index 0000000000..79c4f4ebc2
--- /dev/null
+++ b/nova/tests/pci/test_pci_devspec.py
@@ -0,0 +1,179 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import mock
+
+from nova import exception
+from nova.objects import pci_device
+from nova.pci import pci_devspec
+from nova import test
+
+dev = {"vendor_id": "8086",
+ "product_id": "5057",
+ "address": "1234:5678:8988.5",
+ "phys_function": "0000:0a:00.0"}
+
+
+class PciAddressTestCase(test.NoDBTestCase):
+ def test_wrong_address(self):
+ pci_info = ('{"vendor_id": "8086", "address": "*: *: *.6",' +
+ '"product_id": "5057", "physical_network": "hr_net"}')
+ pci = pci_devspec.PciDeviceSpec(pci_info)
+ self.assertFalse(pci.match(dev))
+
+ def test_address_too_big(self):
+ pci_info = ('{"address": "0000:0a:0b:00.5", ' +
+ '"physical_network": "hr_net"}')
+ self.assertRaises(exception.PciDeviceWrongAddressFormat,
+ pci_devspec.PciDeviceSpec, pci_info)
+
+ def test_address_invalid_character(self):
+ pci_info = '{"address": "0000:h4.12:6", "physical_network": "hr_net"}'
+ self.assertRaises(exception.PciDeviceWrongAddressFormat,
+ pci_devspec.PciDeviceSpec, pci_info)
+
+ def test_max_func(self):
+ pci_info = (('{"address": "0000:0a:00.%s", ' +
+ '"physical_network": "hr_net"}') %
+ (pci_devspec.MAX_FUNC + 1))
+ exc = self.assertRaises(exception.PciDeviceInvalidAddressField,
+ pci_devspec.PciDeviceSpec, pci_info)
+ msg = ('Invalid PCI Whitelist: '
+ 'The PCI address 0000:0a:00.%s has an invalid function.'
+ % (pci_devspec.MAX_FUNC + 1))
+ self.assertEqual(msg, unicode(exc))
+
+ def test_max_domain(self):
+ pci_info = ('{"address": "%x:0a:00.5", "physical_network":"hr_net"}'
+ % (pci_devspec.MAX_DOMAIN + 1))
+ exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ pci_devspec.PciDeviceSpec, pci_info)
+ msg = ('Invalid PCI devices Whitelist config invalid domain %x'
+ % (pci_devspec.MAX_DOMAIN + 1))
+ self.assertEqual(msg, unicode(exc))
+
+ def test_max_bus(self):
+ pci_info = ('{"address": "0000:%x:00.5", "physical_network":"hr_net"}'
+ % (pci_devspec.MAX_BUS + 1))
+ exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ pci_devspec.PciDeviceSpec, pci_info)
+ msg = ('Invalid PCI devices Whitelist config invalid bus %x'
+ % (pci_devspec.MAX_BUS + 1))
+ self.assertEqual(msg, unicode(exc))
+
+ def test_max_slot(self):
+ pci_info = ('{"address": "0000:0a:%x.5", "physical_network":"hr_net"}'
+ % (pci_devspec.MAX_SLOT + 1))
+ exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ pci_devspec.PciDeviceSpec, pci_info)
+ msg = ('Invalid PCI devices Whitelist config invalid slot %x'
+ % (pci_devspec.MAX_SLOT + 1))
+ self.assertEqual(msg, unicode(exc))
+
+ def test_address_is_undefined(self):
+ pci_info = '{"vendor_id":"8086", "product_id":"5057"}'
+ pci = pci_devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(dev))
+
+ def test_partial_address(self):
+ pci_info = '{"address":":0a:00.", "physical_network":"hr_net"}'
+ pci = pci_devspec.PciDeviceSpec(pci_info)
+ dev = {"vendor_id": "1137",
+ "product_id": "0071",
+ "address": "0000:0a:00.5",
+ "phys_function": "0000:0a:00.0"}
+ self.assertTrue(pci.match(dev))
+
+ @mock.patch('nova.pci.pci_utils.is_physical_function', return_value = True)
+ def test_address_is_pf(self, mock_is_physical_function):
+ pci_info = '{"address":"0000:0a:00.0", "physical_network":"hr_net"}'
+ pci = pci_devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(dev))
+
+
+class PciDevSpecTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(PciDevSpecTestCase, self).setUp()
+
+ def test_spec_match(self):
+ pci_info = ('{"vendor_id": "8086","address": "*: *: *.5",' +
+ '"product_id": "5057", "physical_network": "hr_net"}')
+ pci = pci_devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(dev))
+
+ def test_invalid_vendor_id(self):
+ pci_info = ('{"vendor_id": "8087","address": "*: *: *.5", ' +
+ '"product_id": "5057", "physical_network": "hr_net"}')
+ pci = pci_devspec.PciDeviceSpec(pci_info)
+ self.assertFalse(pci.match(dev))
+
+ def test_vendor_id_out_of_range(self):
+ pci_info = ('{"vendor_id": "80860", "address": "*:*:*.5", ' +
+ '"product_id": "5057", "physical_network": "hr_net"}')
+ exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ pci_devspec.PciDeviceSpec, pci_info)
+ self.assertEqual("Invalid PCI devices Whitelist config "
+ "invalid vendor_id 80860", unicode(exc))
+
+ def test_invalid_product_id(self):
+ pci_info = ('{"vendor_id": "8086","address": "*: *: *.5", ' +
+ '"product_id": "5056", "physical_network": "hr_net"}')
+ pci = pci_devspec.PciDeviceSpec(pci_info)
+ self.assertFalse(pci.match(dev))
+
+ def test_product_id_out_of_range(self):
+ pci_info = ('{"vendor_id": "8086","address": "*:*:*.5", ' +
+ '"product_id": "50570", "physical_network": "hr_net"}')
+ exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ pci_devspec.PciDeviceSpec, pci_info)
+ self.assertEqual("Invalid PCI devices Whitelist config "
+ "invalid product_id 50570", unicode(exc))
+
+ def test_devname_and_address(self):
+ pci_info = ('{"devname": "eth0", "vendor_id":"8086", ' +
+ '"address":"*:*:*.5", "physical_network": "hr_net"}')
+ self.assertRaises(exception.PciDeviceInvalidDeviceName,
+ pci_devspec.PciDeviceSpec, pci_info)
+
+ @mock.patch('nova.pci.pci_utils.get_function_by_ifname',
+ return_value = ("0000:0a:00.0", True))
+ def test_by_name(self, mock_get_function_by_ifname):
+ pci_info = '{"devname": "eth0", "physical_network": "hr_net"}'
+ pci = pci_devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(dev))
+
+ @mock.patch('nova.pci.pci_utils.get_function_by_ifname',
+ return_value = (None, False))
+ def test_invalid_name(self, mock_get_function_by_ifname):
+ pci_info = '{"devname": "lo", "physical_network": "hr_net"}'
+ exc = self.assertRaises(exception.PciDeviceNotFoundById,
+ pci_devspec.PciDeviceSpec, pci_info)
+ self.assertEqual('PCI device lo not found', unicode(exc))
+
+ def test_pci_obj(self):
+ pci_info = ('{"vendor_id": "8086","address": "*:*:*.5", ' +
+ '"product_id": "5057", "physical_network": "hr_net"}')
+
+ pci = pci_devspec.PciDeviceSpec(pci_info)
+ pci_dev = {
+ 'compute_node_id': 1,
+ 'address': '0000:00:00.5',
+ 'product_id': '5057',
+ 'vendor_id': '8086',
+ 'status': 'available',
+ 'extra_k1': 'v1',
+ }
+
+ pci_obj = pci_device.PciDevice.create(pci_dev)
+ self.assertTrue(pci.match_pci_obj(pci_obj))
diff --git a/nova/tests/pci/test_pci_manager.py b/nova/tests/pci/test_pci_manager.py
index a1e24d0720..19caf2cb3b 100644
--- a/nova/tests/pci/test_pci_manager.py
+++ b/nova/tests/pci/test_pci_manager.py
@@ -21,7 +21,6 @@
from nova import db
from nova import exception
from nova import objects
-from nova.objects import pci_device as pci_device_obj
from nova.pci import pci_device
from nova.pci import pci_manager
from nova.pci import pci_request
@@ -75,7 +74,7 @@ class PciDevTrackerTestCase(test.TestCase):
def _create_fake_instance(self):
self.inst = objects.Instance()
self.inst.uuid = 'fake-inst-uuid'
- self.inst.pci_devices = pci_device_obj.PciDeviceList()
+ self.inst.pci_devices = objects.PciDeviceList()
self.inst.vm_state = vm_states.ACTIVE
self.inst.task_state = None
@@ -105,7 +104,8 @@ def setUp(self):
def test_pcidev_tracker_create(self):
self.assertEqual(len(self.tracker.pci_devs), 3)
- self.assertEqual(len(self.tracker.free_devs), 3)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 3)
self.assertEqual(self.tracker.stale.keys(), [])
self.assertEqual(len(self.tracker.stats.pools), 2)
self.assertEqual(self.tracker.node_id, 1)
@@ -114,23 +114,6 @@ def test_pcidev_tracker_create_no_nodeid(self):
self.tracker = pci_manager.PciDevTracker()
self.assertEqual(len(self.tracker.pci_devs), 0)
- def test_get_free_devices_for_requests(self):
- devs = self.tracker.get_free_devices_for_requests(fake_pci_requests)
- self.assertEqual(len(devs), 2)
- self.assertEqual(set([dev['vendor_id'] for dev in devs]),
- set(['v1', 'v']))
-
- def test_get_free_devices_for_requests_empty(self):
- devs = self.tracker.get_free_devices_for_requests([])
- self.assertEqual(len(devs), 0)
-
- def test_get_free_devices_for_requests_meet_partial(self):
- requests = copy.deepcopy(fake_pci_requests)
- requests[1]['count'] = 2
- requests[1]['spec'][0]['vendor_id'] = 'v'
- devs = self.tracker.get_free_devices_for_requests(requests)
- self.assertEqual(len(devs), 0)
-
def test_set_hvdev_new_dev(self):
fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2')
fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
@@ -173,8 +156,9 @@ def test_set_hvdev_changed_stal(self):
def test_update_pci_for_instance_active(self):
self.pci_requests = fake_pci_requests
self.tracker.update_pci_for_instance(self.inst)
- self.assertEqual(len(self.tracker.free_devs), 1)
- self.assertEqual(self.tracker.free_devs[0]['vendor_id'], 'v')
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.assertEqual(free_devs[0]['vendor_id'], 'v')
def test_update_pci_for_instance_fail(self):
self.pci_requests = copy.deepcopy(fake_pci_requests)
@@ -186,10 +170,12 @@ def test_update_pci_for_instance_fail(self):
def test_update_pci_for_instance_deleted(self):
self.pci_requests = fake_pci_requests
self.tracker.update_pci_for_instance(self.inst)
- self.assertEqual(len(self.tracker.free_devs), 1)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
self.inst.vm_state = vm_states.DELETED
self.tracker.update_pci_for_instance(self.inst)
- self.assertEqual(len(self.tracker.free_devs), 3)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 3)
self.assertEqual(set([dev['vendor_id'] for
dev in self.tracker.pci_devs]),
set(['v', 'v1']))
@@ -197,15 +183,18 @@ def test_update_pci_for_instance_deleted(self):
def test_update_pci_for_instance_resize_source(self):
self.pci_requests = fake_pci_requests
self.tracker.update_pci_for_instance(self.inst)
- self.assertEqual(len(self.tracker.free_devs), 1)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
self.inst.task_state = task_states.RESIZE_MIGRATED
self.tracker.update_pci_for_instance(self.inst)
- self.assertEqual(len(self.tracker.free_devs), 3)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 3)
def test_update_pci_for_instance_resize_dest(self):
self.pci_requests = fake_pci_requests
self.tracker.update_pci_for_migration(self.inst)
- self.assertEqual(len(self.tracker.free_devs), 1)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
self.assertEqual(len(self.tracker.claims['fake-inst-uuid']), 2)
self.assertNotIn('fake-inst-uuid', self.tracker.allocations)
self.inst.task_state = task_states.RESIZE_FINISH
@@ -216,14 +205,16 @@ def test_update_pci_for_instance_resize_dest(self):
def test_update_pci_for_migration_in(self):
self.pci_requests = fake_pci_requests
self.tracker.update_pci_for_migration(self.inst)
- self.assertEqual(len(self.tracker.free_devs), 1)
- self.assertEqual(self.tracker.free_devs[0]['vendor_id'], 'v')
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.assertEqual(free_devs[0]['vendor_id'], 'v')
def test_update_pci_for_migration_out(self):
self.pci_requests = fake_pci_requests
self.tracker.update_pci_for_migration(self.inst)
self.tracker.update_pci_for_migration(self.inst, sign=-1)
- self.assertEqual(len(self.tracker.free_devs), 3)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 3)
self.assertEqual(set([dev['vendor_id'] for
dev in self.tracker.pci_devs]),
set(['v', 'v1']))
@@ -277,13 +268,15 @@ def test_clean_usage(self):
self.tracker.update_pci_for_instance(self.inst)
self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}]
self.tracker.update_pci_for_instance(inst_2)
- self.assertEqual(len(self.tracker.free_devs), 1)
- self.assertEqual(self.tracker.free_devs[0]['vendor_id'], 'v')
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
+ self.assertEqual(free_devs[0]['vendor_id'], 'v')
self.tracker.clean_usage([self.inst], [migr], [orph])
- self.assertEqual(len(self.tracker.free_devs), 2)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 2)
self.assertEqual(
- set([dev['vendor_id'] for dev in self.tracker.free_devs]),
+ set([dev['vendor_id'] for dev in free_devs]),
set(['v', 'v1']))
def test_clean_usage_claims(self):
@@ -296,11 +289,13 @@ def test_clean_usage_claims(self):
self.tracker.update_pci_for_instance(self.inst)
self.pci_requests = [{'count': 1, 'spec': [{'vendor_id': 'v1'}]}]
self.tracker.update_pci_for_migration(inst_2)
- self.assertEqual(len(self.tracker.free_devs), 1)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 1)
self.tracker.clean_usage([self.inst], [migr], [orph])
- self.assertEqual(len(self.tracker.free_devs), 2)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 2)
self.assertEqual(
- set([dev['vendor_id'] for dev in self.tracker.free_devs]),
+ set([dev['vendor_id'] for dev in free_devs]),
set(['v', 'v1']))
def test_clean_usage_no_request_match_no_claims(self):
@@ -309,11 +304,13 @@ def test_clean_usage_no_request_match_no_claims(self):
# calls clean_usage.
self.pci_requests = None
self.tracker.update_pci_for_migration(instance=self.inst, sign=1)
- self.assertEqual(3, len(self.tracker.free_devs))
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(3, len(free_devs))
self.tracker.clean_usage([], [], [])
- self.assertEqual(3, len(self.tracker.free_devs))
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(3, len(free_devs))
self.assertEqual(
- set([dev['address'] for dev in self.tracker.free_devs]),
+ set([dev['address'] for dev in free_devs]),
set(['0000:00:00.1', '0000:00:00.2', '0000:00:00.3']))
@@ -334,7 +331,7 @@ def test_get_devs_object(self):
def _fake_obj_load_attr(foo, attrname):
if attrname == 'pci_devices':
self.load_attr_called = True
- foo.pci_devices = pci_device_obj.PciDeviceList()
+ foo.pci_devices = objects.PciDeviceList()
inst = fakes.stub_instance(id='1')
ctxt = context.get_admin_context()
diff --git a/nova/tests/pci/test_pci_stats.py b/nova/tests/pci/test_pci_stats.py
index 9104e2ea9e..9a81c58e11 100644
--- a/nova/tests/pci/test_pci_stats.py
+++ b/nova/tests/pci/test_pci_stats.py
@@ -14,7 +14,7 @@
# under the License.
from nova import exception
-from nova.objects import pci_device
+from nova import objects
from nova.openstack.common import jsonutils
from nova.pci import pci_stats as pci
from nova import test
@@ -51,9 +51,9 @@
class PciDeviceStatsTestCase(test.NoDBTestCase):
def _create_fake_devs(self):
- self.fake_dev_1 = pci_device.PciDevice.create(fake_pci_1)
- self.fake_dev_2 = pci_device.PciDevice.create(fake_pci_2)
- self.fake_dev_3 = pci_device.PciDevice.create(fake_pci_3)
+ self.fake_dev_1 = objects.PciDevice.create(fake_pci_1)
+ self.fake_dev_2 = objects.PciDevice.create(fake_pci_2)
+ self.fake_dev_3 = objects.PciDevice.create(fake_pci_3)
map(self.pci_stats.add_device,
[self.fake_dev_1, self.fake_dev_2, self.fake_dev_3])
@@ -71,15 +71,15 @@ def test_add_device(self):
set([1, 2]))
def test_remove_device(self):
- self.pci_stats.consume_device(self.fake_dev_2)
+ self.pci_stats.remove_device(self.fake_dev_2)
self.assertEqual(len(self.pci_stats.pools), 1)
self.assertEqual(self.pci_stats.pools[0]['count'], 2)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
def test_remove_device_exception(self):
- self.pci_stats.consume_device(self.fake_dev_2)
+ self.pci_stats.remove_device(self.fake_dev_2)
self.assertRaises(exception.PciDevicePoolEmpty,
- self.pci_stats.consume_device,
+ self.pci_stats.remove_device,
self.fake_dev_2)
def test_json_creat(self):
@@ -116,3 +116,18 @@ def test_apply_requests_failed(self):
self.assertRaises(exception.PciDeviceRequestFailed,
self.pci_stats.apply_requests,
pci_requests_multiple)
+
+ def test_consume_requests(self):
+ devs = self.pci_stats.consume_requests(pci_requests)
+ self.assertEqual(2, len(devs))
+ self.assertEqual(set(['v1', 'v2']),
+ set([dev['vendor_id'] for dev in devs]))
+
+ def test_consume_requests_empty(self):
+ devs = self.pci_stats.consume_requests([])
+ self.assertEqual(0, len(devs))
+
+ def test_consume_requests_failed(self):
+ self.assertRaises(exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests_multiple)
diff --git a/nova/tests/pci/test_pci_whitelist.py b/nova/tests/pci/test_pci_whitelist.py
index 0b737eb7bb..ae923ef0f6 100644
--- a/nova/tests/pci/test_pci_whitelist.py
+++ b/nova/tests/pci/test_pci_whitelist.py
@@ -14,7 +14,7 @@
# under the License.
from nova import exception
-from nova.objects import pci_device
+from nova import objects
from nova.pci import pci_whitelist
from nova import test
@@ -59,7 +59,7 @@ def test_whitelist(self):
'product_id': '0001'}])
def test_whitelist_empty(self):
- dev = pci_device.PciDevice.create(dev_dict)
+ dev = objects.PciDevice.create(dev_dict)
parsed = pci_whitelist.PciHostDevicesWhiteList()
self.assertEqual(parsed.device_assignable(dev), False)
@@ -73,13 +73,13 @@ def test_whitelist_multiple(self):
{'vendor_id': '8087', 'product_id': '0002'}])
def test_device_assignable(self):
- dev = pci_device.PciDevice.create(dev_dict)
+ dev = objects.PciDevice.create(dev_dict)
white_list = '[{"product_id":"0001", "vendor_id":"8086"}]'
parsed = pci_whitelist.PciHostDevicesWhiteList([white_list])
self.assertEqual(parsed.device_assignable(dev), True)
def test_device_assignable_multiple(self):
- dev = pci_device.PciDevice.create(dev_dict)
+ dev = objects.PciDevice.create(dev_dict)
white_list_1 = '[{"product_id":"0001", "vendor_id":"8086"}]'
white_list_2 = '[{"product_id":"0002", "vendor_id":"8087"}]'
parsed = pci_whitelist.PciHostDevicesWhiteList(
@@ -93,5 +93,5 @@ def test_get_pci_devices_filter(self):
white_list_1 = '[{"product_id":"0001", "vendor_id":"8086"}]'
self.flags(pci_passthrough_whitelist=[white_list_1])
pci_filter = pci_whitelist.get_pci_devices_filter()
- dev = pci_device.PciDevice.create(dev_dict)
+ dev = objects.PciDevice.create(dev_dict)
self.assertEqual(pci_filter.device_assignable(dev), True)
diff --git a/nova/tests/policy_fixture.py b/nova/tests/policy_fixture.py
index 8f7e7206fd..3da8cc7d8a 100644
--- a/nova/tests/policy_fixture.py
+++ b/nova/tests/policy_fixture.py
@@ -12,12 +12,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
import os
import fixtures
from oslo.config import cfg
+from nova.openstack.common import jsonutils
from nova.openstack.common import policy as common_policy
import nova.policy
from nova.tests import fake_policy
@@ -56,7 +56,7 @@ def setUp(self):
allow users of the specified role only
"""
super(RoleBasedPolicyFixture, self).setUp()
- policy = json.load(open(CONF.policy_file))
+ policy = jsonutils.load(open(CONF.policy_file))
# Convert all actions to require specified role
for action, rule in policy.iteritems():
@@ -66,7 +66,7 @@ def setUp(self):
self.policy_file_name = os.path.join(self.policy_dir.path,
'policy.json')
with open(self.policy_file_name, 'w') as policy_file:
- json.dump(policy, policy_file)
+ jsonutils.dump(policy, policy_file)
CONF.set_override('policy_file', self.policy_file_name)
nova.policy.reset()
nova.policy.init()
diff --git a/nova/tests/scheduler/ironic_fakes.py b/nova/tests/scheduler/ironic_fakes.py
new file mode 100644
index 0000000000..266fab08d8
--- /dev/null
+++ b/nova/tests/scheduler/ironic_fakes.py
@@ -0,0 +1,75 @@
+# Copyright 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Fake nodes for Ironic host manager tests.
+"""
+
+from nova.openstack.common import jsonutils
+
+
+COMPUTE_NODES = [
+ dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host1', disabled=False),
+ hypervisor_hostname='node1uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=10, free_ram_mb=1024),
+ dict(id=2, local_gb=20, memory_mb=2048, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host2', disabled=True),
+ hypervisor_hostname='node2uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=20, free_ram_mb=2048),
+ dict(id=3, local_gb=30, memory_mb=3072, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host3', disabled=False),
+ hypervisor_hostname='node3uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=30, free_ram_mb=3072),
+ dict(id=4, local_gb=40, memory_mb=4096, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ service=dict(host='host4', disabled=False),
+ hypervisor_hostname='node4uuid', host_ip='127.0.0.1',
+ hypervisor_version=1, hypervisor_type='ironic',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=40, free_ram_mb=4096),
+ # Broken entry
+ dict(id=5, local_gb=50, memory_mb=5120, vcpus=1, service=None,
+ cpu_info='baremetal cpu',
+ stats=jsonutils.dumps(dict(ironic_driver=
+ "nova.virt.ironic.driver.IronicDriver",
+ cpu_arch='i386')),
+ supported_instances='[["i386", "baremetal", "baremetal"]]',
+ free_disk_gb=50, free_ram_mb=5120),
+]
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index a38138c31c..064eb4d50c 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -27,7 +27,7 @@
from nova import context
from nova import db
from nova import exception
-from nova.objects import instance_group as instance_group_obj
+from nova import objects
from nova.pci import pci_request
from nova.scheduler import driver
from nova.scheduler import filter_scheduler
@@ -371,18 +371,19 @@ def test_post_select_populate(self):
self.assertEqual({'vcpus': 5}, host_state.limits)
- def _create_server_group(self):
+ def _create_server_group(self, policy='anti-affinity'):
instance = fake_instance.fake_instance_obj(self.context,
params={'host': 'hostA'})
- group = instance_group_obj.InstanceGroup()
+ group = objects.InstanceGroup()
group.name = 'pele'
group.uuid = str(uuid.uuid4())
group.members = [instance.uuid]
- group.policies = ['anti-affinity']
+ group.policies = [policy]
return group
- def _test_group_details_in_filter_properties(self, group, func, hint):
+ def _group_details_in_filter_properties(self, group, func='get_by_uuid',
+ hint=None, policy=None):
sched = fakes.FakeFilterScheduler()
filter_properties = {
@@ -393,28 +394,67 @@ def _test_group_details_in_filter_properties(self, group, func, hint):
}
with contextlib.nested(
- mock.patch.object(instance_group_obj.InstanceGroup, func,
- return_value=group),
- mock.patch.object(instance_group_obj.InstanceGroup, 'get_hosts',
- return_value=['hostA']),
+ mock.patch.object(objects.InstanceGroup, func, return_value=group),
+ mock.patch.object(objects.InstanceGroup, 'get_hosts',
+ return_value=['hostA']),
) as (get_group, get_hosts):
+ sched._supports_anti_affinity = True
update_group_hosts = sched._setup_instance_group(self.context,
filter_properties)
self.assertTrue(update_group_hosts)
self.assertEqual(set(['hostA', 'hostB']),
filter_properties['group_hosts'])
- self.assertEqual(['anti-affinity'],
- filter_properties['group_policies'])
+ self.assertEqual([policy], filter_properties['group_policies'])
+
+ def test_group_details_in_filter_properties(self):
+ for policy in ['affinity', 'anti-affinity']:
+ group = self._create_server_group(policy)
+ self._group_details_in_filter_properties(group, func='get_by_uuid',
+ hint=group.uuid,
+ policy=policy)
+
+ def _group_filter_with_filter_not_configured(self, policy):
+ self.flags(scheduler_default_filters=['f1', 'f2'])
+ sched = fakes.FakeFilterScheduler()
+
+ instance = fake_instance.fake_instance_obj(self.context,
+ params={'host': 'hostA'})
+
+ group = objects.InstanceGroup()
+ group.uuid = str(uuid.uuid4())
+ group.members = [instance.uuid]
+ group.policies = [policy]
+
+ filter_properties = {
+ 'scheduler_hints': {
+ 'group': group.uuid,
+ },
+ }
+
+ with contextlib.nested(
+ mock.patch.object(objects.InstanceGroup, 'get_by_uuid',
+ return_value=group),
+ mock.patch.object(objects.InstanceGroup, 'get_hosts',
+ return_value=['hostA']),
+ ) as (get_group, get_hosts):
+ self.assertRaises(exception.NoValidHost,
+ sched._setup_instance_group, self.context,
+ filter_properties)
+
+ def test_group_filter_with_filter_not_configured(self):
+ policies = ['anti-affinity', 'affinity']
+ for policy in policies:
+ self._group_filter_with_filter_not_configured(policy)
def test_group_uuid_details_in_filter_properties(self):
group = self._create_server_group()
- self._test_group_details_in_filter_properties(group, 'get_by_uuid',
- group.uuid)
+ self._group_details_in_filter_properties(group, 'get_by_uuid',
+ group.uuid, 'anti-affinity')
def test_group_name_details_in_filter_properties(self):
group = self._create_server_group()
- self._test_group_details_in_filter_properties(group, 'get_by_name',
- group.name)
+ self._group_details_in_filter_properties(group, 'get_by_name',
+ group.name, 'anti-affinity')
def test_schedule_host_pool(self):
"""Make sure the scheduler_host_subset_size property works properly."""
@@ -566,6 +606,22 @@ def _fake_weigh_objects(_self, functions, hosts, options):
self.assertEqual(host, selected_hosts[0])
self.assertEqual(node, selected_nodes[0])
+ @mock.patch.object(filter_scheduler.FilterScheduler, '_schedule')
+ def test_select_destinations_notifications(self, mock_schedule):
+ mock_schedule.return_value = [mock.Mock()]
+
+ with mock.patch.object(self.driver.notifier, 'info') as mock_info:
+ request_spec = {'num_instances': 1}
+
+ self.driver.select_destinations(self.context, request_spec, {})
+
+ expected = [
+ mock.call(self.context, 'scheduler.select_destinations.start',
+ dict(request_spec=request_spec)),
+ mock.call(self.context, 'scheduler.select_destinations.end',
+ dict(request_spec=request_spec))]
+ self.assertEqual(expected, mock_info.call_args_list)
+
def test_select_destinations_no_valid_host(self):
def _return_no_host(*args, **kwargs):
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index 634cf66927..2b1fdb09f2 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -17,7 +17,9 @@
import httplib
+import mock
from oslo.config import cfg
+import six
import stubout
from nova import context
@@ -243,6 +245,7 @@ class HostFiltersTestCase(test.NoDBTestCase):
def fake_oat_request(self, *args, **kwargs):
"""Stubs out the response from OAT service."""
self.oat_attested = True
+ self.oat_hosts = args[2]
return httplib.OK, self.oat_data
def setUp(self):
@@ -464,15 +467,15 @@ def test_type_filter(self):
service = {'disabled': False}
host = fakes.FakeHostState('fake_host', 'fake_node',
{'service': service})
- #True since empty
+ # True since empty
self.assertTrue(filt_cls.host_passes(host, filter_properties))
fakes.FakeInstance(context=self.context,
params={'host': 'fake_host', 'instance_type_id': 1})
- #True since same type
+ # True since same type
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- #False since different type
+ # False since different type
self.assertFalse(filt_cls.host_passes(host, filter2_properties))
- #False since node not homogeneous
+ # False since node not homogeneous
fakes.FakeInstance(context=self.context,
params={'host': 'fake_host', 'instance_type_id': 2})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@@ -488,13 +491,13 @@ def test_aggregate_type_filter(self):
service = {'disabled': False}
host = fakes.FakeHostState('fake_host', 'fake_node',
{'service': service})
- #True since no aggregates
+ # True since no aggregates
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- #True since type matches aggregate, metadata
+ # True since type matches aggregate, metadata
self._create_aggregate_with_host(name='fake_aggregate',
hosts=['fake_host'], metadata={'instance_type': 'fake1'})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- #False since type matches aggregate, metadata
+ # False since type matches aggregate, metadata
self.assertFalse(filt_cls.host_passes(host, filter2_properties))
def test_ram_filter_fails_on_memory(self):
@@ -822,6 +825,28 @@ def _do_test_compute_filter_extra_specs(self, ecaps, especs, passes):
assertion = self.assertTrue if passes else self.assertFalse
assertion(filt_cls.host_passes(host, filter_properties))
+ def test_compute_filter_pass_cpu_info_as_text_type(self):
+ cpu_info = """ { "vendor": "Intel", "model": "core2duo",
+ "arch": "i686","features": ["lahf_lm", "rdtscp"], "topology":
+ {"cores": 1, "threads":1, "sockets": 1}} """
+
+ cpu_info = six.text_type(cpu_info)
+
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'cpu_info': cpu_info},
+ especs={'capabilities:cpu_info:vendor': 'Intel'},
+ passes=True)
+
+ def test_compute_filter_fail_cpu_info_as_text_type_not_valid(self):
+ cpu_info = "cpu_info"
+
+ cpu_info = six.text_type(cpu_info)
+
+ self._do_test_compute_filter_extra_specs(
+ ecaps={'cpu_info': cpu_info},
+ especs={'capabilities:cpu_info:vendor': 'Intel'},
+ passes=False)
+
def test_compute_filter_passes_extra_specs_simple(self):
self._do_test_compute_filter_extra_specs(
ecaps={'stats': {'opt1': 1, 'opt2': 2}},
@@ -1292,7 +1317,7 @@ def test_trusted_filter_default_passes(self):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_trusted_passes(self):
- self.oat_data = {"hosts": [{"host_name": "host1",
+ self.oat_data = {"hosts": [{"host_name": "node1",
"trust_lvl": "trusted",
"vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
@@ -1305,7 +1330,7 @@ def test_trusted_filter_trusted_and_trusted_passes(self):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_trusted_and_untrusted_fails(self):
- self.oat_data = {"hosts": [{"host_name": "host1",
+ self.oat_data = {"hosts": [{"host_name": "node1",
"trust_lvl": "untrusted",
"vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
@@ -1318,7 +1343,7 @@ def test_trusted_filter_trusted_and_untrusted_fails(self):
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_trusted_fails(self):
- self.oat_data = {"hosts": [{"host_name": "host1",
+ self.oat_data = {"hosts": [{"host_name": "node",
"trust_lvl": "trusted",
"vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
@@ -1331,7 +1356,7 @@ def test_trusted_filter_untrusted_and_trusted_fails(self):
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_untrusted_and_untrusted_passes(self):
- self.oat_data = {"hosts": [{"host_name": "host1",
+ self.oat_data = {"hosts": [{"host_name": "node1",
"trust_lvl": "untrusted",
"vtime": timeutils.isotime()}]}
self._stub_service_is_up(True)
@@ -1344,8 +1369,8 @@ def test_trusted_filter_untrusted_and_untrusted_passes(self):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_trusted_filter_update_cache(self):
- self.oat_data = {"hosts": [{"host_name":
- "host1", "trust_lvl": "untrusted",
+ self.oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
"vtime": timeutils.isotime()}]}
filt_cls = self.class_map['TrustedFilter']()
@@ -1372,7 +1397,7 @@ def test_trusted_filter_update_cache(self):
timeutils.clear_time_override()
def test_trusted_filter_update_cache_timezone(self):
- self.oat_data = {"hosts": [{"host_name": "host1",
+ self.oat_data = {"hosts": [{"host_name": "node1",
"trust_lvl": "untrusted",
"vtime": "2012-09-09T05:10:40-04:00"}]}
@@ -1401,6 +1426,52 @@ def test_trusted_filter_update_cache_timezone(self):
timeutils.clear_time_override()
+ @mock.patch('nova.db.compute_node_get_all')
+ def test_trusted_filter_combine_hosts(self, mockdb):
+ self.oat_data = {"hosts": [{"host_name": "node1",
+ "trust_lvl": "untrusted",
+ "vtime": "2012-09-09T05:10:40-04:00"}]}
+ fake_compute_nodes = [
+ {'hypervisor_hostname': 'node1',
+ 'service': {'host': 'host1'},
+ },
+ {'hypervisor_hostname': 'node2',
+ 'service': {'host': 'host2'},
+ }, ]
+ mockdb.return_value = fake_compute_nodes
+ filt_cls = self.class_map['TrustedFilter']()
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': self.context.elevated(),
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'node1', {})
+
+ filt_cls.host_passes(host, filter_properties) # Fill the caches
+ self.assertEqual(set(self.oat_hosts), set(['node1', 'node2']))
+
+ def test_trusted_filter_trusted_and_locale_formated_vtime_passes(self):
+ self.oat_data = {"hosts": [{"host_name": "host1",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.strtime(fmt="%c")},
+ {"host_name": "host2",
+ "trust_lvl": "trusted",
+ "vtime": timeutils.strtime(fmt="%D")},
+ # This is just a broken date to ensure that
+ # we're not just arbitrarily accepting any
+ # date format.
+ ]}
+ self._stub_service_is_up(True)
+ filt_cls = self.class_map['TrustedFilter']()
+ extra_specs = {'trust:trusted_host': 'trusted'}
+ filter_properties = {'context': self.context.elevated(),
+ 'instance_type': {'memory_mb': 1024,
+ 'extra_specs': extra_specs}}
+ host = fakes.FakeHostState('host1', 'host1', {})
+ bad_host = fakes.FakeHostState('host2', 'host2', {})
+
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ self.assertFalse(filt_cls.host_passes(bad_host, filter_properties))
+
def test_core_filter_passes(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
@@ -1831,3 +1902,71 @@ def test_metrics_filter_missing_metrics(self):
attribute_dict={'metrics': metrics})
filt_cls = self.class_map['MetricsFilter']()
self.assertFalse(filt_cls.host_passes(host, None))
+
+ def test_aggregate_filter_num_iops_value(self):
+ self.flags(max_io_ops_per_host=7)
+ filt_cls = self.class_map['AggregateIoOpsFilter']()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 7})
+ filter_properties = {'context': self.context}
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ self._create_aggregate_with_host(
+ name='fake_aggregate',
+ hosts=['host1'],
+ metadata={'max_io_ops_per_host': 8})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_filter_num_iops_value_error(self):
+ self.flags(max_io_ops_per_host=8)
+ filt_cls = self.class_map['AggregateIoOpsFilter']()
+ host = fakes.FakeHostState('host1', 'node1',
+ {'num_io_ops': 7})
+ self._create_aggregate_with_host(
+ name='fake_aggregate',
+ hosts=['host1'],
+ metadata={'max_io_ops_per_host': 'XXX'})
+ filter_properties = {'context': self.context}
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_disk_filter_value_error(self):
+ self._stub_service_is_up(True)
+ filt_cls = self.class_map['AggregateDiskFilter']()
+ self.flags(disk_allocation_ratio=1.0)
+ filter_properties = {
+ 'context': self.context,
+ 'instance_type': {'root_gb': 1,
+ 'ephemeral_gb': 1,
+ 'swap': 1024}}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 3 * 1024,
+ 'total_usable_disk_gb': 1,
+ 'service': service})
+ self._create_aggregate_with_host(name='fake_aggregate',
+ hosts=['host1'],
+ metadata={'disk_allocation_ratio': 'XXX'})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ def test_aggregate_disk_filter_default_value(self):
+ self._stub_service_is_up(True)
+ filt_cls = self.class_map['AggregateDiskFilter']()
+ self.flags(disk_allocation_ratio=1.0)
+ filter_properties = {
+ 'context': self.context,
+ 'instance_type': {'root_gb': 2,
+ 'ephemeral_gb': 1,
+ 'swap': 1024}}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1', 'node1',
+ {'free_disk_mb': 3 * 1024,
+ 'total_usable_disk_gb': 1,
+ 'service': service})
+ # Uses global conf.
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
+ # Uses an aggregate with ratio
+ self._create_aggregate_with_host(
+ name='fake_aggregate',
+ hosts=['host1'],
+ metadata={'disk_allocation_ratio': '2'})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
diff --git a/nova/tests/scheduler/test_ironic_host_manager.py b/nova/tests/scheduler/test_ironic_host_manager.py
new file mode 100644
index 0000000000..761dbf893a
--- /dev/null
+++ b/nova/tests/scheduler/test_ironic_host_manager.py
@@ -0,0 +1,412 @@
+# Copyright (c) 2014 OpenStack Foundation
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For IronicHostManager
+"""
+
+import mock
+
+from nova import db
+from nova import exception
+from nova.openstack.common import jsonutils
+from nova.scheduler import filters
+from nova.scheduler import host_manager
+from nova.scheduler import ironic_host_manager
+from nova import test
+from nova.tests.scheduler import ironic_fakes
+
+
+class FakeFilterClass1(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class FakeFilterClass2(filters.BaseHostFilter):
+ def host_passes(self, host_state, filter_properties):
+ pass
+
+
+class IronicHostManagerTestCase(test.NoDBTestCase):
+ """Test case for IronicHostManager class."""
+
+ def setUp(self):
+ super(IronicHostManagerTestCase, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+
+ def test_get_all_host_states(self):
+ # Ensure .service is set and we have the values we expect to.
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+
+ self.assertEqual(len(host_states_map), 4)
+ for i in range(4):
+ compute_node = ironic_fakes.COMPUTE_NODES[i]
+ host = compute_node['service']['host']
+ node = compute_node['hypervisor_hostname']
+ state_key = (host, node)
+ self.assertEqual(compute_node['service'],
+ host_states_map[state_key].service)
+ self.assertEqual(jsonutils.loads(compute_node['stats']),
+ host_states_map[state_key].stats)
+ self.assertEqual(compute_node['free_ram_mb'],
+ host_states_map[state_key].free_ram_mb)
+ self.assertEqual(compute_node['free_disk_gb'] * 1024,
+ host_states_map[state_key].free_disk_mb)
+
+
+class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
+ """Test case for IronicHostManager class."""
+
+ def setUp(self):
+ super(IronicHostManagerChangedNodesTestCase, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+ ironic_driver = "nova.virt.ironic.driver.IronicDriver"
+ supported_instances = '[["i386", "baremetal", "baremetal"]]'
+ self.compute_node = dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
+ vcpus_used=0, local_gb_used=0, memory_mb_used=0,
+ updated_at=None, cpu_info='baremetal cpu',
+ stats=jsonutils.dumps(dict(
+ ironic_driver=ironic_driver,
+ cpu_arch='i386')),
+ supported_instances=supported_instances,
+ free_disk_gb=10, free_ram_mb=1024)
+
+ @mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
+ def test_create_ironic_node_state(self, init_mock):
+ init_mock.return_value = None
+ compute = {'cpu_info': 'baremetal cpu'}
+ host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
+ compute=compute)
+ self.assertIs(ironic_host_manager.IronicNodeState, type(host_state))
+
+ @mock.patch.object(host_manager.HostState, '__init__')
+ def test_create_non_ironic_host_state(self, init_mock):
+ init_mock.return_value = None
+ compute = {'cpu_info': 'other cpu'}
+ host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
+ compute=compute)
+ self.assertIs(host_manager.HostState, type(host_state))
+
+ def test_get_all_host_states_after_delete_one(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ # remove node4 for second call
+ running_nodes = [n for n in ironic_fakes.COMPUTE_NODES
+ if n.get('hypervisor_hostname') != 'node4uuid']
+ db.compute_node_get_all(context).AndReturn(running_nodes)
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(3, len(host_states_map))
+
+ def test_get_all_host_states_after_delete_all(self):
+ context = 'fake_context'
+
+ self.mox.StubOutWithMock(db, 'compute_node_get_all')
+ # all nodes active for first call
+ db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
+ # remove all nodes for second call
+ db.compute_node_get_all(context).AndReturn([])
+ self.mox.ReplayAll()
+
+ self.host_manager.get_all_host_states(context)
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(0, len(host_states_map))
+
+ def test_update_from_compute_node(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ self.assertEqual(1024, host.free_ram_mb)
+ self.assertEqual(1024, host.total_usable_ram_mb)
+ self.assertEqual(10240, host.free_disk_mb)
+ self.assertEqual(1, host.vcpus_total)
+ self.assertEqual(0, host.vcpus_used)
+ self.assertEqual(jsonutils.loads(self.compute_node['stats']),
+ host.stats)
+
+ def test_consume_identical_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=10, ephemeral_gb=0, memory_mb=1024, vcpus=1)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+ def test_consume_larger_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=20, ephemeral_gb=0, memory_mb=2048, vcpus=2)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+ def test_consume_smaller_instance_from_compute(self):
+ host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
+ host.update_from_compute_node(self.compute_node)
+
+ instance = dict(root_gb=5, ephemeral_gb=0, memory_mb=512, vcpus=1)
+ host.consume_from_instance(instance)
+
+ self.assertEqual(1, host.vcpus_used)
+ self.assertEqual(0, host.free_ram_mb)
+ self.assertEqual(0, host.free_disk_mb)
+
+
+class IronicHostManagerTestFilters(test.NoDBTestCase):
+ """Test filters work for IronicHostManager."""
+
+ def setUp(self):
+ super(IronicHostManagerTestFilters, self).setUp()
+ self.host_manager = ironic_host_manager.IronicHostManager()
+ self.fake_hosts = [ironic_host_manager.IronicNodeState(
+ 'fake_host%s' % x, 'fake-node') for x in range(1, 5)]
+ self.fake_hosts += [ironic_host_manager.IronicNodeState(
+ 'fake_multihost', 'fake-node%s' % x) for x in range(1, 5)]
+
+ def test_choose_host_filters_not_found(self):
+ self.flags(scheduler_default_filters='FakeFilterClass3')
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+ self.assertRaises(exception.SchedulerHostFilterNotFound,
+ self.host_manager._choose_host_filters, None)
+
+ def test_choose_host_filters(self):
+ self.flags(scheduler_default_filters=['FakeFilterClass2'])
+ self.host_manager.filter_classes = [FakeFilterClass1,
+ FakeFilterClass2]
+
+ # Test we returns 1 correct function
+ filter_classes = self.host_manager._choose_host_filters(None)
+ self.assertEqual(1, len(filter_classes))
+ self.assertEqual('FakeFilterClass2', filter_classes[0].__name__)
+
+ def _mock_get_filtered_hosts(self, info, specified_filters=None):
+ self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
+
+ info['got_objs'] = []
+ info['got_fprops'] = []
+
+ def fake_filter_one(_self, obj, filter_props):
+ info['got_objs'].append(obj)
+ info['got_fprops'].append(filter_props)
+ return True
+
+ self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
+ self.host_manager._choose_host_filters(specified_filters).AndReturn(
+ [FakeFilterClass1])
+
+ def _verify_result(self, info, result, filters=True):
+ for x in info['got_fprops']:
+ self.assertEqual(x, info['expected_fprops'])
+ if filters:
+ self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
+ self.assertEqual(set(info['expected_objs']), set(result))
+
+ def test_get_filtered_hosts(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_specified_filters(self):
+ fake_properties = {'moo': 1, 'cow': 2}
+
+ specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
+ info = {'expected_objs': self.fake_hosts,
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info, specified_filters)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties, filter_class_names=specified_filters)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_ignore(self):
+ fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5', 'fake_multihost']}
+
+ # [1] and [3] are host2 and host4
+ info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result)
+
+ def test_get_filtered_hosts_with_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
+ 'fake_host5']}
+
+ # [0] and [2] are host1 and host3
+ info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_no_matching_force_hosts(self):
+ fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
+ # Ensure ignore_hosts processed before force_hosts in host filters.
+ fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
+ 'ignore_hosts': ['fake_host1']}
+
+ # only fake_host3 should be left.
+ info = {'expected_objs': [self.fake_hosts[2]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
+ # Ensure all nodes returned for a host with many nodes
+ fake_properties = {'force_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
+ self.fake_hosts[6], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_nodes(self):
+ fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
+ 'fake-node9']}
+
+ # [5] is fake-node2, [7] is fake-node4
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
+ # Ensure only overlapping results if both force host and node
+ fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
+ 'force_nodes': ['fake-node2', 'fake-node9']}
+
+ # [5] is fake-node2
+ info = {'expected_objs': [self.fake_hosts[5]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
+ # Ensure non-overlapping force_node and force_host yield no result
+ fake_properties = {'force_hosts': ['fake_multihost'],
+ 'force_nodes': ['fake-node']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
+ # Ensure ignore_hosts can coexist with force_nodes
+ fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
+ 'ignore_hosts': ['fake_host1', 'fake_host2']}
+
+ info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
+
+ def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
+ # Ensure ignore_hosts is processed before force_nodes
+ fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
+ 'ignore_hosts': ['fake_multihost']}
+
+ info = {'expected_objs': [],
+ 'expected_fprops': fake_properties}
+ self._mock_get_filtered_hosts(info)
+
+ self.mox.ReplayAll()
+
+ result = self.host_manager.get_filtered_hosts(self.fake_hosts,
+ fake_properties)
+ self._verify_result(info, result, False)
diff --git a/nova/tests/scheduler/test_rpcapi.py b/nova/tests/scheduler/test_rpcapi.py
index de088b2212..0ba0feb540 100644
--- a/nova/tests/scheduler/test_rpcapi.py
+++ b/nova/tests/scheduler/test_rpcapi.py
@@ -63,13 +63,6 @@ def _test_scheduler_api(self, method, rpc_method, **kwargs):
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
- def test_prep_resize(self):
- self._test_scheduler_api('prep_resize', rpc_method='cast',
- instance='fake_instance',
- instance_type='fake_type', image='fake_image',
- request_spec='fake_request_spec',
- filter_properties='fake_props', reservations=list('fake_res'))
-
def test_select_destinations(self):
self._test_scheduler_api('select_destinations', rpc_method='call',
request_spec='fake_request_spec',
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index b3d0c7343d..eb1f3d4888 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -280,7 +280,7 @@ def setUp(self):
super(SchedulerTestCase, self).setUp()
self.stubs.Set(compute_api, 'API', fakes.FakeComputeAPI)
- def fake_show(meh, context, id):
+ def fake_show(meh, context, id, **kwargs):
if id:
return {'id': id, 'min_disk': None, 'min_ram': None,
'name': 'fake_name',
diff --git a/nova/tests/scheduler/test_scheduler_utils.py b/nova/tests/scheduler/test_scheduler_utils.py
index e7a391b033..4613a419f2 100644
--- a/nova/tests/scheduler/test_scheduler_utils.py
+++ b/nova/tests/scheduler/test_scheduler_utils.py
@@ -215,3 +215,9 @@ def test_parse_options(self):
'=',
float,
[('bar', -2.1)])
+
+ def test_validate_filters_configured(self):
+ self.flags(scheduler_default_filters='FakeFilter1,FakeFilter2')
+ self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
+ self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
+ self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
diff --git a/nova/tests/scheduler/test_weights.py b/nova/tests/scheduler/test_weights.py
index bc8d416182..6d4743e1fa 100644
--- a/nova/tests/scheduler/test_weights.py
+++ b/nova/tests/scheduler/test_weights.py
@@ -207,7 +207,7 @@ def _check_parsing_result(self, weigher, setting, results):
weigher._parse_setting()
self.assertEqual(len(weigher.setting), len(results))
for item in results:
- self.assertTrue(item in weigher.setting)
+ self.assertIn(item, weigher.setting)
def test_parse_setting(self):
weigher = self.weight_classes[0]()
diff --git a/nova/tests/test_api_validation.py b/nova/tests/test_api_validation.py
index fc1b4598a1..0829cacbe5 100644
--- a/nova/tests/test_api_validation.py
+++ b/nova/tests/test_api_validation.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import re
+
from nova.api import validation
from nova.api.validation import parameter_types
from nova import exception
@@ -24,11 +26,10 @@ def check_validation_error(self, method, body, expected_detail):
try:
method(body=body)
except exception.ValidationError as ex:
- expected_kwargs = {
- 'code': 400,
- 'detail': expected_detail
- }
- self.assertEqual(ex.kwargs, expected_kwargs)
+ self.assertEqual(400, ex.kwargs['code'])
+ if not re.match(expected_detail, ex.kwargs['detail']):
+ self.assertEqual(expected_detail, ex.kwargs['detail'],
+ 'Exception details did not match expected')
except Exception as ex:
self.fail('An unexpected exception happens: %s' % ex)
else:
@@ -144,6 +145,48 @@ def test_validate_additionalProperties_disable_fails(self):
expected_detail=detail)
+class PatternPropertiesTestCase(APIValidationTestCase):
+
+ def setUp(self):
+ super(PatternPropertiesTestCase, self).setUp()
+ schema = {
+ 'patternProperties': {
+ '^[a-zA-Z0-9]{1,10}$': {
+ 'type': 'string'
+ },
+ },
+ 'additionalProperties': False,
+ }
+
+ @validation.schema(request_body_schema=schema)
+ def post(body):
+ return 'Validation succeeded.'
+
+ self.post = post
+
+ def test_validate_patternProperties(self):
+ self.assertEqual('Validation succeeded.',
+ self.post(body={'foo': 'bar'}))
+
+ def test_validate_patternProperties_fails(self):
+ detail = "Additional properties are not allowed ('__' was unexpected)"
+ self.check_validation_error(self.post, body={'__': 'bar'},
+ expected_detail=detail)
+
+ detail = "Additional properties are not allowed ('' was unexpected)"
+ self.check_validation_error(self.post, body={'': 'bar'},
+ expected_detail=detail)
+
+ detail = ("Additional properties are not allowed ('0123456789a' was"
+ " unexpected)")
+ self.check_validation_error(self.post, body={'0123456789a': 'bar'},
+ expected_detail=detail)
+
+ detail = "expected string or buffer"
+ self.check_validation_error(self.post, body={None: 'bar'},
+ expected_detail=detail)
+
+
class StringTestCase(APIValidationTestCase):
def setUp(self):
@@ -314,22 +357,22 @@ def test_validate_integer_range(self):
def test_validate_integer_range_fails(self):
detail = ("Invalid input for field/attribute foo. Value: 0."
- " 0.0 is less than the minimum of 1")
+ " 0(.0)? is less than the minimum of 1")
self.check_validation_error(self.post, body={'foo': 0},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 11."
- " 11.0 is greater than the maximum of 10")
+ " 11(.0)? is greater than the maximum of 10")
self.check_validation_error(self.post, body={'foo': 11},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 0."
- " 0.0 is less than the minimum of 1")
+ " 0(.0)? is less than the minimum of 1")
self.check_validation_error(self.post, body={'foo': '0'},
expected_detail=detail)
detail = ("Invalid input for field/attribute foo. Value: 11."
- " 11.0 is greater than the maximum of 10")
+ " 11(.0)? is greater than the maximum of 10")
self.check_validation_error(self.post, body={'foo': '11'},
expected_detail=detail)
@@ -498,7 +541,7 @@ def test_validate_name(self):
self.post(body={'foo': 'a'}))
def test_validate_name_fails(self):
- pattern = "'^(?! )[a-zA-Z0-9. _-]+(?= [6, 3]:
+ return False
return self._check_min_windows_version_satisfied
self.stubs.Set(hostutils.HostUtils, 'check_min_windows_version',
fake_check_min_windows_version)
@@ -134,10 +138,6 @@ def fake_sleep(ms):
pass
self.stubs.Set(time, 'sleep', fake_sleep)
- def fake_vmutils__init__(self, host='.'):
- pass
- vmutils.VMUtils.__init__ = fake_vmutils__init__
-
self.stubs.Set(pathutils, 'PathUtils', fake.PathUtils)
self._mox.StubOutWithMock(fake.PathUtils, 'open')
self._mox.StubOutWithMock(fake.PathUtils, 'copyfile')
@@ -164,7 +164,7 @@ def fake_vmutils__init__(self, host='.'):
self._mox.StubOutWithMock(vmutils.VMUtils, 'set_nic_connection')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_scsi_controller')
self._mox.StubOutWithMock(vmutils.VMUtils, 'get_vm_ide_controller')
- self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks_count')
+ self._mox.StubOutWithMock(vmutils.VMUtils, 'get_attached_disks')
self._mox.StubOutWithMock(vmutils.VMUtils,
'attach_volume_to_controller')
self._mox.StubOutWithMock(vmutils.VMUtils,
@@ -343,6 +343,13 @@ def test_list_instances(self):
self.assertEqual(instances, fake_instances)
+ def test_get_host_uptime(self):
+ fake_host = "fake_host"
+ with mock.patch.object(self._conn._hostops,
+ "get_host_uptime") as mock_uptime:
+ self._conn._hostops.get_host_uptime(fake_host)
+ mock_uptime.assert_called_once_with(fake_host)
+
def test_get_info(self):
self._instance_data = self._get_instance_data()
@@ -767,6 +774,15 @@ def _test_pre_live_migration(self, cow, with_volumes):
else:
self.assertIsNone(self._fetched_image)
+ def test_get_instance_disk_info_is_implemented(self):
+ # Ensure that the method has been implemented in the driver
+ try:
+ disk_info = self._conn.get_instance_disk_info('fake_instance_name')
+ self.assertIsNone(disk_info)
+ except NotImplementedError:
+ self.fail("test_get_instance_disk_info() should not raise "
+ "NotImplementedError")
+
def test_snapshot_with_update_failure(self):
(snapshot_name, func_call_matcher) = self._setup_snapshot_mocks()
@@ -1022,15 +1038,25 @@ def _setup_spawn_instance_mocks(self, cow, setup_vif_mocks_func=None,
m.AndReturn(self._test_instance_dir)
self._setup_get_cached_image_mocks(cow, vhd_format)
+ m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
+ m.AndReturn({'MaxInternalSize': 1024, 'FileSize': 1024,
+ 'Type': 2})
if cow:
- vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
- mox.IsA(str))
+ m = vhdutils.VHDUtils.get_vhd_format(mox.IsA(str))
+ m.AndReturn(vhd_format)
+ if vhd_format == constants.DISK_FORMAT_VHD:
+ vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
+ mox.IsA(str))
+ else:
+ m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
+ mox.IsA(str), mox.IsA(object))
+ m.AndReturn(1025)
+ vhdutils.VHDUtils.create_differencing_vhd(mox.IsA(str),
+ mox.IsA(str),
+ mox.IsA(int))
else:
fake.PathUtils.copyfile(mox.IsA(str), mox.IsA(str))
- m = vhdutils.VHDUtils.get_vhd_info(mox.IsA(str))
- m.AndReturn({'MaxInternalSize': 1024, 'FileSize': 1024,
- 'Type': 2})
m = vhdutils.VHDUtils.get_internal_vhd_size_by_file_size(
mox.IsA(str), mox.IsA(object))
m.AndReturn(1025)
@@ -1121,6 +1147,8 @@ def _mock_attach_volume(self, instance_name, target_iqn, target_lun,
fake_mounted_disk = "fake_mounted_disk"
fake_device_number = 0
fake_controller_path = 'fake_scsi_controller_path'
+ self._mox.StubOutWithMock(self._conn._volumeops,
+ '_get_free_controller_slot')
self._mock_login_storage_target(target_iqn, target_lun,
target_portal,
@@ -1140,7 +1168,8 @@ def _mock_attach_volume(self, instance_name, target_iqn, target_lun,
m.AndReturn(fake_controller_path)
fake_free_slot = 1
- m = vmutils.VMUtils.get_attached_disks_count(fake_controller_path)
+ m = self._conn._volumeops._get_free_controller_slot(
+ fake_controller_path)
m.AndReturn(fake_free_slot)
m = vmutils.VMUtils.attach_volume_to_controller(instance_name,
@@ -1493,13 +1522,45 @@ def test_migrate_disk_and_power_off_smaller_root_vhd_size_exception(self):
(instance, fake_dest_ip, network_info, flavor) = args
self._mox.ReplayAll()
- self.assertRaises(vmutils.VHDResizeException,
+ self.assertRaises(exception.InstanceFaultRollback,
self._conn.migrate_disk_and_power_off,
self._context, instance, fake_dest_ip,
flavor, network_info)
self._mox.VerifyAll()
- def _test_finish_migration(self, power_on, ephemeral_storage=False):
+ def _mock_attach_config_drive(self, instance, config_drive_format):
+ instance['config_drive'] = True
+ self._mox.StubOutWithMock(fake.PathUtils, 'lookup_configdrive_path')
+ m = fake.PathUtils.lookup_configdrive_path(
+ mox.Func(self._check_instance_name))
+
+ if config_drive_format in constants.DISK_FORMAT_MAP:
+ m.AndReturn(self._test_instance_dir + '/configdrive.' +
+ config_drive_format)
+ else:
+ m.AndReturn(None)
+
+ m = vmutils.VMUtils.attach_ide_drive(
+ mox.Func(self._check_instance_name),
+ mox.IsA(str),
+ mox.IsA(int),
+ mox.IsA(int),
+ mox.IsA(str))
+ m.WithSideEffects(self._add_ide_disk).InAnyOrder()
+
+ def _verify_attach_config_drive(self, config_drive_format):
+ if config_drive_format == constants.IDE_DISK_FORMAT.lower():
+ self.assertEqual(self._instance_ide_disks[1],
+ self._test_instance_dir + '/configdrive.' +
+ config_drive_format)
+ elif config_drive_format == constants.IDE_DVD_FORMAT.lower():
+ self.assertEqual(self._instance_ide_dvds[0],
+ self._test_instance_dir + '/configdrive.' +
+ config_drive_format)
+
+ def _test_finish_migration(self, power_on, ephemeral_storage=False,
+ config_drive=False,
+ config_drive_format='iso'):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
instance['system_metadata'] = {}
@@ -1548,11 +1609,17 @@ def _test_finish_migration(self, power_on, ephemeral_storage=False):
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
+ if config_drive:
+ self._mock_attach_config_drive(instance, config_drive_format)
+
self._mox.ReplayAll()
self._conn.finish_migration(self._context, None, instance, "",
network_info, None, False, None, power_on)
self._mox.VerifyAll()
+ if config_drive:
+ self._verify_attach_config_drive(config_drive_format)
+
def test_finish_migration_power_on(self):
self._test_finish_migration(True)
@@ -1562,6 +1629,14 @@ def test_finish_migration_power_off(self):
def test_finish_migration_with_ephemeral_storage(self):
self._test_finish_migration(False, ephemeral_storage=True)
+ def test_finish_migration_attach_config_drive_iso(self):
+ self._test_finish_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DVD_FORMAT.lower())
+
+ def test_finish_migration_attach_config_drive_vhd(self):
+ self._test_finish_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DISK_FORMAT.lower())
+
def test_confirm_migration(self):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
@@ -1573,7 +1648,9 @@ def test_confirm_migration(self):
self._conn.confirm_migration(None, instance, network_info)
self._mox.VerifyAll()
- def _test_finish_revert_migration(self, power_on, ephemeral_storage=False):
+ def _test_finish_revert_migration(self, power_on, ephemeral_storage=False,
+ config_drive=False,
+ config_drive_format='iso'):
self._instance_data = self._get_instance_data()
instance = db.instance_create(self._context, self._instance_data)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
@@ -1611,12 +1688,18 @@ def _test_finish_revert_migration(self, power_on, ephemeral_storage=False):
vmutils.VMUtils.set_vm_state(mox.Func(self._check_instance_name),
constants.HYPERV_VM_STATE_ENABLED)
+ if config_drive:
+ self._mock_attach_config_drive(instance, config_drive_format)
+
self._mox.ReplayAll()
self._conn.finish_revert_migration(self._context, instance,
network_info, None,
power_on)
self._mox.VerifyAll()
+ if config_drive:
+ self._verify_attach_config_drive(config_drive_format)
+
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(True)
@@ -1632,6 +1715,14 @@ def test_spawn_no_admin_permissions(self):
def test_finish_revert_migration_with_ephemeral_storage(self):
self._test_finish_revert_migration(False, ephemeral_storage=True)
+ def test_finish_revert_migration_attach_config_drive_iso(self):
+ self._test_finish_revert_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DVD_FORMAT.lower())
+
+ def test_finish_revert_migration_attach_config_drive_vhd(self):
+ self._test_finish_revert_migration(False, config_drive=True,
+ config_drive_format=constants.IDE_DISK_FORMAT.lower())
+
def test_plug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError,
@@ -1732,3 +1823,38 @@ def test_get_mounted_disk_from_lun_failure(self):
self.assertRaises(exception.NotFound,
self.volumeops._get_mounted_disk_from_lun,
target_iqn, target_lun)
+
+ def test_get_free_controller_slot_exception(self):
+ fake_drive = mock.MagicMock()
+ type(fake_drive).AddressOnParent = mock.PropertyMock(
+ side_effect=xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER))
+ fake_scsi_controller_path = 'fake_scsi_controller_path'
+
+ with mock.patch.object(self.volumeops._vmutils,
+ 'get_attached_disks') as fake_get_attached_disks:
+ fake_get_attached_disks.return_value = (
+ [fake_drive] * constants.SCSI_CONTROLLER_SLOTS_NUMBER)
+ self.assertRaises(vmutils.HyperVException,
+ self.volumeops._get_free_controller_slot,
+ fake_scsi_controller_path)
+
+
+class HostOpsTestCase(HyperVAPIBaseTestCase):
+ """Unit tests for the Hyper-V hostops class."""
+
+ def setUp(self):
+ self._hostops = hostops.HostOps()
+ self._hostops._hostutils = mock.MagicMock()
+ self._hostops.time = mock.MagicMock()
+ super(HostOpsTestCase, self).setUp()
+
+ @mock.patch('nova.virt.hyperv.hostops.time')
+ def test_host_uptime(self, mock_time):
+ self._hostops._hostutils.get_host_tick_count64.return_value = 100
+ mock_time.strftime.return_value = "01:01:01"
+
+ result_uptime = "01:01:01 up %s, 0 users, load average: 0, 0, 0" % (
+ str(datetime.timedelta(
+ milliseconds = long(100))))
+ actual_uptime = self._hostops.get_host_uptime()
+ self.assertEqual(result_uptime, actual_uptime)
diff --git a/nova/tests/virt/hyperv/test_migrationops.py b/nova/tests/virt/hyperv/test_migrationops.py
new file mode 100644
index 0000000000..0af56d55fe
--- /dev/null
+++ b/nova/tests/virt/hyperv/test_migrationops.py
@@ -0,0 +1,46 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import test
+from nova.tests import fake_instance
+from nova.virt.hyperv import migrationops
+from nova.virt.hyperv import vmutils
+
+
+class MigrationOpsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V MigrationOps class."""
+
+ def setUp(self):
+ super(MigrationOpsTestCase, self).setUp()
+ self.context = 'fake-context'
+
+ # utilsfactory will check the host OS version via get_hostutils,
+ # in order to return the proper Utils Class, so it must be mocked.
+ patched_func = mock.patch.object(migrationops.utilsfactory,
+ "get_hostutils")
+ patched_func.start()
+ self.addCleanup(patched_func.stop)
+
+ self._migrationops = migrationops.MigrationOps()
+
+ def test_check_and_attach_config_drive_unknown_path(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.config_drive = 'True'
+ self._migrationops._pathutils.lookup_configdrive_path = mock.MagicMock(
+ return_value=None)
+ self.assertRaises(vmutils.HyperVException,
+ self._migrationops._check_and_attach_config_drive,
+ instance)
diff --git a/nova/tests/virt/hyperv/test_pathutils.py b/nova/tests/virt/hyperv/test_pathutils.py
new file mode 100644
index 0000000000..0ded84ec6b
--- /dev/null
+++ b/nova/tests/virt/hyperv/test_pathutils.py
@@ -0,0 +1,58 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import mock
+
+from nova import test
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import pathutils
+
+
+class PathUtilsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V PathUtils class."""
+
+ def setUp(self):
+ self.fake_instance_dir = os.path.join('C:', 'fake_instance_dir')
+ self.fake_instance_name = 'fake_instance_name'
+ self._pathutils = pathutils.PathUtils()
+ super(PathUtilsTestCase, self).setUp()
+
+ def _mock_lookup_configdrive_path(self, ext):
+ self._pathutils.get_instance_dir = mock.MagicMock(
+ return_value=self.fake_instance_dir)
+
+ def mock_exists(*args, **kwargs):
+ path = args[0]
+ return True if path[(path.rfind('.') + 1):] == ext else False
+ self._pathutils.exists = mock_exists
+ configdrive_path = self._pathutils.lookup_configdrive_path(
+ self.fake_instance_name)
+ return configdrive_path
+
+ def test_lookup_configdrive_path(self):
+ for format_ext in constants.DISK_FORMAT_MAP:
+ configdrive_path = self._mock_lookup_configdrive_path(format_ext)
+ fake_path = os.path.join(self.fake_instance_dir,
+ 'configdrive.' + format_ext)
+ self.assertEqual(configdrive_path, fake_path)
+
+ def test_lookup_configdrive_path_non_exist(self):
+ self._pathutils.get_instance_dir = mock.MagicMock(
+ return_value=self.fake_instance_dir)
+ self._pathutils.exists = mock.MagicMock(return_value=False)
+ configdrive_path = self._pathutils.lookup_configdrive_path(
+ self.fake_instance_name)
+ self.assertIsNone(configdrive_path)
diff --git a/nova/tests/virt/hyperv/test_utilsfactory.py b/nova/tests/virt/hyperv/test_utilsfactory.py
new file mode 100644
index 0000000000..58e2b2988c
--- /dev/null
+++ b/nova/tests/virt/hyperv/test_utilsfactory.py
@@ -0,0 +1,61 @@
+# Copyright 2014 Cloudbase Solutions SRL
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unit tests for the Hyper-V utils factory.
+"""
+
+import mock
+from oslo.config import cfg
+
+from nova import test
+from nova.virt.hyperv import hostutils
+from nova.virt.hyperv import utilsfactory
+from nova.virt.hyperv import vmutils
+from nova.virt.hyperv import vmutilsv2
+
+CONF = cfg.CONF
+
+
+class TestHyperVUtilsFactory(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestHyperVUtilsFactory, self).setUp()
+
+ def test_get_vmutils_force_v1_and_min_version(self):
+ self._test_returned_class(None, True, True)
+
+ def test_get_vmutils_v2(self):
+ self._test_returned_class(vmutilsv2.VMUtilsV2, False, True)
+
+ def test_get_vmutils_v2_r2(self):
+ self._test_returned_class(vmutils.VMUtils, False, False)
+
+ def test_get_vmutils_force_v1_and_not_min_version(self):
+ self._test_returned_class(vmutils.VMUtils, True, False)
+
+ def _test_returned_class(self, expected_class, force_v1, os_supports_v2):
+ CONF.set_override('force_hyperv_utils_v1', force_v1, 'hyperv')
+ with mock.patch.object(
+ hostutils.HostUtils,
+ 'check_min_windows_version') as mock_check_min_windows_version:
+ mock_check_min_windows_version.return_value = os_supports_v2
+
+ if os_supports_v2 and force_v1:
+ self.assertRaises(vmutils.HyperVException,
+ utilsfactory.get_vmutils)
+ else:
+ actual_class = type(utilsfactory.get_vmutils())
+ self.assertEqual(actual_class, expected_class)
diff --git a/nova/tests/virt/hyperv/test_vhdutils.py b/nova/tests/virt/hyperv/test_vhdutils.py
index c08a8902e6..e41353329a 100644
--- a/nova/tests/virt/hyperv/test_vhdutils.py
+++ b/nova/tests/virt/hyperv/test_vhdutils.py
@@ -24,6 +24,7 @@ class VHDUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V VHDUtils class."""
_FAKE_VHD_PATH = "C:\\fake_path.vhdx"
+ _FAKE_PARENT_PATH = "C:\\fake_parent_path.vhdx"
_FAKE_FORMAT = 3
_FAKE_MAK_INTERNAL_SIZE = 1000
_FAKE_JOB_PATH = 'fake_job_path'
@@ -51,6 +52,26 @@ def test_create_dynamic_vhd(self):
Path=self._FAKE_VHD_PATH,
MaxInternalSize=self._FAKE_MAK_INTERNAL_SIZE)
+ def test_create_differencing_vhd(self):
+ mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
+ mock_img_svc.CreateDifferencingVirtualHardDisk.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vhdutils.create_differencing_vhd(self._FAKE_VHD_PATH,
+ self._FAKE_PARENT_PATH)
+
+ mock_img_svc.CreateDifferencingVirtualHardDisk.assert_called_once_with(
+ Path=self._FAKE_VHD_PATH,
+ ParentPath=self._FAKE_PARENT_PATH)
+
+ def test_create_differencing_vhd_with_new_size(self):
+ fake_new_size = 1024
+ self.assertRaises(vmutils.HyperVException,
+ self._vhdutils.create_differencing_vhd,
+ self._FAKE_VHD_PATH,
+ self._FAKE_PARENT_PATH,
+ fake_new_size)
+
def test_get_internal_vhd_size_by_file_size_fixed(self):
vhdutil = vhdutils.VHDUtils()
root_vhd_size = 1 * 1024 ** 3
@@ -76,15 +97,24 @@ def test_get_internal_vhd_size_by_file_size_dynamic(self):
expected_vhd_size = 20 * 1024 ** 3 - 43008
self.assertEqual(expected_vhd_size, real_size)
- def test_get_internal_vhd_size_by_file_size_unsupported(self):
+ def test_get_internal_vhd_size_by_file_size_differencing(self):
+ # For differencing images, the internal size of the parent vhd
+ # is returned
vhdutil = vhdutils.VHDUtils()
root_vhd_size = 20 * 1024 ** 3
vhdutil.get_vhd_info = mock.MagicMock()
- vhdutil.get_vhd_info.return_value = {'Type': 5}
+ vhdutil.get_vhd_parent_path = mock.MagicMock()
+ vhdutil.get_vhd_parent_path.return_value = self._FAKE_VHD_PATH
+ vhdutil.get_vhd_info.side_effect = [
+ {'Type': 4}, {'Type': constants.VHD_TYPE_DYNAMIC}]
- self.assertRaises(vmutils.HyperVException,
- vhdutil.get_internal_vhd_size_by_file_size,
- None, root_vhd_size)
+ vhdutil._get_vhd_dynamic_blk_size = mock.MagicMock()
+ vhdutil._get_vhd_dynamic_blk_size.return_value = 2097152
+
+ real_size = vhdutil.get_internal_vhd_size_by_file_size(None,
+ root_vhd_size)
+ expected_vhd_size = 20 * 1024 ** 3 - 43008
+ self.assertEqual(expected_vhd_size, real_size)
def test_get_vhd_format_vhdx(self):
with mock.patch('nova.virt.hyperv.vhdutils.open',
diff --git a/nova/tests/virt/hyperv/test_vhdutilsv2.py b/nova/tests/virt/hyperv/test_vhdutilsv2.py
index 4058654c9d..d813057724 100644
--- a/nova/tests/virt/hyperv/test_vhdutilsv2.py
+++ b/nova/tests/virt/hyperv/test_vhdutilsv2.py
@@ -35,6 +35,11 @@ class VHDUtilsV2TestCase(test.NoDBTestCase):
_FAKE_LOG_SIZE = 1048576
_FAKE_LOGICAL_SECTOR_SIZE = 4096
_FAKE_METADATA_SIZE = 1048576
+ _FAKE_VHD_INFO = {'ParentPath': _FAKE_PARENT_VHD_PATH,
+ 'Format': _FAKE_FORMAT,
+ 'BlockSize': _FAKE_BLOCK_SIZE,
+ 'LogicalSectorSize': _FAKE_LOGICAL_SECTOR_SIZE,
+ 'Type': _FAKE_TYPE}
def setUp(self):
self._vhdutils = vhdutilsv2.VHDUtilsV2()
@@ -166,13 +171,16 @@ def test_resize_vhd(self):
self.mock_get.assert_called_once_with(self._FAKE_VHD_PATH,
self._FAKE_MAK_INTERNAL_SIZE)
- def test_get_vhdx_internal_size(self):
- self._vhdutils.get_vhd_info = mock.MagicMock(
- return_value={'ParentPath': self._FAKE_PARENT_VHD_PATH,
- 'Format': self._FAKE_FORMAT,
- 'BlockSize': self._FAKE_BLOCK_SIZE,
- 'LogicalSectorSize': self._FAKE_LOGICAL_SECTOR_SIZE,
- 'Type': self._FAKE_TYPE})
+ def _test_get_vhdx_internal_size(self, vhd_type):
+ self._vhdutils.get_vhd_info = mock.MagicMock()
+ self._vhdutils.get_vhd_parent_path = mock.Mock(
+ return_value=self._FAKE_PARENT_VHD_PATH)
+
+ if vhd_type == 4:
+ self._vhdutils.get_vhd_info.side_effect = [
+ {'Type': vhd_type}, self._FAKE_VHD_INFO]
+ else:
+ self._vhdutils.get_vhd_info.return_value = self._FAKE_VHD_INFO
self._vhdutils._get_vhdx_log_size = mock.MagicMock(
return_value=self._FAKE_LOG_SIZE)
self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock(
@@ -189,6 +197,12 @@ def test_get_vhdx_internal_size(self):
self.assertEqual(self._FAKE_MAK_INTERNAL_SIZE - self._FAKE_BLOCK_SIZE,
internal_size)
+ def test_get_vhdx_internal_size_dynamic(self):
+ self._test_get_vhdx_internal_size(3)
+
+ def test_get_vhdx_internal_size_differencing(self):
+ self._test_get_vhdx_internal_size(4)
+
def test_get_vhdx_current_header(self):
VHDX_HEADER_OFFSETS = [64 * 1024, 128 * 1024]
fake_sequence_numbers = ['\x01\x00\x00\x00\x00\x00\x00\x00',
diff --git a/nova/tests/virt/hyperv/test_vmops.py b/nova/tests/virt/hyperv/test_vmops.py
new file mode 100644
index 0000000000..020dd95706
--- /dev/null
+++ b/nova/tests/virt/hyperv/test_vmops.py
@@ -0,0 +1,137 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import timeout as etimeout
+import mock
+
+from nova import exception
+from nova import test
+from nova.tests import fake_instance
+from nova.virt.hyperv import constants
+from nova.virt.hyperv import vmops
+from nova.virt.hyperv import vmutils
+
+
+class VMOpsTestCase(test.NoDBTestCase):
+ """Unit tests for the Hyper-V VMOps class."""
+
+ _FAKE_TIMEOUT = 0
+
+ def __init__(self, test_case_name):
+ super(VMOpsTestCase, self).__init__(test_case_name)
+
+ def setUp(self):
+ super(VMOpsTestCase, self).setUp()
+ self.context = 'fake-context'
+
+ # utilsfactory will check the host OS version via get_hostutils,
+ # in order to return the proper Utils Class, so it must be mocked.
+ patched_func = mock.patch.object(vmops.utilsfactory,
+ "get_hostutils")
+ patched_func.start()
+ self.addCleanup(patched_func.stop)
+
+ self._vmops = vmops.VMOps()
+
+ def test_attach_config_drive(self):
+ instance = fake_instance.fake_instance_obj(self.context)
+ self.assertRaises(exception.InvalidDiskFormat,
+ self._vmops.attach_config_drive,
+ instance, 'C:/fake_instance_dir/configdrive.xxx')
+
+ def test_reboot_hard(self):
+ self._test_reboot(vmops.REBOOT_TYPE_HARD,
+ constants.HYPERV_VM_STATE_REBOOT)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_reboot_soft(self, mock_soft_shutdown):
+ mock_soft_shutdown.return_value = True
+ self._test_reboot(vmops.REBOOT_TYPE_SOFT,
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_reboot_soft_failed(self, mock_soft_shutdown):
+ mock_soft_shutdown.return_value = False
+ self._test_reboot(vmops.REBOOT_TYPE_SOFT,
+ constants.HYPERV_VM_STATE_REBOOT)
+
+ @mock.patch("nova.virt.hyperv.vmops.VMOps.power_on")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
+ def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on):
+ mock_soft_shutdown.return_value = True
+ mock_power_on.side_effect = vmutils.HyperVException("Expected failure")
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ self.assertRaises(vmutils.HyperVException, self._vmops.reboot,
+ instance, {}, vmops.REBOOT_TYPE_SOFT)
+
+ mock_soft_shutdown.assert_called_once_with(instance)
+ mock_power_on.assert_called_once_with(instance)
+
+ def _test_reboot(self, reboot_type, vm_state):
+ instance = fake_instance.fake_instance_obj(self.context)
+ with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
+ self._vmops.reboot(instance, {}, reboot_type)
+ mock_set_state.assert_called_once_with(instance.name, vm_state)
+
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ @mock.patch("nova.virt.hyperv.vmops.VMOps._wait_for_power_off")
+ def test_soft_shutdown(self, mock_wait_for_power_off, mock_shutdown_vm):
+ instance = fake_instance.fake_instance_obj(self.context)
+ mock_wait_for_power_off.return_value = True
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
+
+ mock_shutdown_vm.assert_called_once_with(instance.name)
+ mock_wait_for_power_off.assert_called_once_with(
+ instance.name, self._FAKE_TIMEOUT)
+
+ self.assertTrue(result)
+
+ @mock.patch("nova.virt.hyperv.vmutils.VMUtils.soft_shutdown_vm")
+ def test_soft_shutdown_failed(self, mock_shutdown_vm):
+ instance = fake_instance.fake_instance_obj(self.context)
+
+ mock_shutdown_vm.side_effect = vmutils.HyperVException(
+ "Expected failure.")
+
+ result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
+
+ mock_shutdown_vm.assert_called_once_with(instance.name)
+ self.assertFalse(result)
+
+ def test_get_vm_state(self):
+ summary_info = {'EnabledState': constants.HYPERV_VM_STATE_DISABLED}
+
+ with mock.patch.object(self._vmops._vmutils,
+ 'get_vm_summary_info') as mock_get_summary_info:
+ mock_get_summary_info.return_value = summary_info
+
+ response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
+ self.assertEqual(response, constants.HYPERV_VM_STATE_DISABLED)
+
+ @mock.patch.object(vmops.VMOps, '_get_vm_state')
+ def test_wait_for_power_off_true(self, mock_get_state):
+ mock_get_state.return_value = constants.HYPERV_VM_STATE_DISABLED
+ result = self._vmops._wait_for_power_off(
+ mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
+ mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME)
+ self.assertTrue(result)
+
+ @mock.patch.object(vmops.etimeout, "with_timeout")
+ def test_wait_for_power_off_false(self, mock_with_timeout):
+ mock_with_timeout.side_effect = etimeout.Timeout()
+ result = self._vmops._wait_for_power_off(
+ mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
+ self.assertFalse(result)
diff --git a/nova/tests/virt/hyperv/test_vmutils.py b/nova/tests/virt/hyperv/test_vmutils.py
index c9e029ba3d..65c7f84cb9 100644
--- a/nova/tests/virt/hyperv/test_vmutils.py
+++ b/nova/tests/virt/hyperv/test_vmutils.py
@@ -1,4 +1,4 @@
-# Copyright 2013 Cloudbase Solutions Srl
+# Copyright 2014 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,7 +15,9 @@
import mock
+from nova import exception
from nova import test
+from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
@@ -24,10 +26,46 @@ class VMUtilsTestCase(test.NoDBTestCase):
_FAKE_VM_NAME = 'fake_vm'
_FAKE_MEMORY_MB = 2
+ _FAKE_VCPUS_NUM = 4
+ _FAKE_JOB_PATH = 'fake_job_path'
+ _FAKE_RET_VAL = 0
+ _FAKE_RET_VAL_BAD = -1
+ _FAKE_CTRL_PATH = 'fake_ctrl_path'
+ _FAKE_CTRL_ADDR = 0
+ _FAKE_DRIVE_ADDR = 0
+ _FAKE_MOUNTED_DISK_PATH = 'fake_mounted_disk_path'
_FAKE_VM_PATH = "fake_vm_path"
_FAKE_VHD_PATH = "fake_vhd_path"
_FAKE_DVD_PATH = "fake_dvd_path"
_FAKE_VOLUME_DRIVE_PATH = "fake_volume_drive_path"
+ _FAKE_SNAPSHOT_PATH = "fake_snapshot_path"
+ _FAKE_RES_DATA = "fake_res_data"
+ _FAKE_HOST_RESOURCE = "fake_host_resource"
+ _FAKE_CLASS = "FakeClass"
+ _FAKE_RES_PATH = "fake_res_path"
+ _FAKE_RES_NAME = 'fake_res_name'
+ _FAKE_ADDRESS = "fake_address"
+ _FAKE_JOB_STATUS_DONE = 7
+ _FAKE_JOB_STATUS_BAD = -1
+ _FAKE_JOB_DESCRIPTION = "fake_job_description"
+ _FAKE_ERROR = "fake_error"
+ _FAKE_ELAPSED_TIME = 0
+ _CONCRETE_JOB = "Msvm_ConcreteJob"
+ _FAKE_DYNAMIC_MEMORY_RATIO = 1.0
+
+ _FAKE_SUMMARY_INFO = {'NumberOfProcessors': 4,
+ 'EnabledState': 2,
+ 'MemoryUsage': 2,
+ 'UpTime': 1}
+
+ _DEFINE_SYSTEM = 'DefineVirtualSystem'
+ _DESTROY_SYSTEM = 'DestroyVirtualSystem'
+ _DESTROY_SNAPSHOT = 'RemoveVirtualSystemSnapshot'
+ _ADD_RESOURCE = 'AddVirtualSystemResources'
+ _REMOVE_RESOURCE = 'RemoveVirtualSystemResources'
+ _SETTING_TYPE = 'SettingType'
+
+ _VIRTUAL_SYSTEM_TYPE_REALIZED = 3
def setUp(self):
self._vmutils = vmutils.VMUtils()
@@ -40,6 +78,20 @@ def test_enable_vm_metrics_collection(self):
self._vmutils.enable_vm_metrics_collection,
self._FAKE_VM_NAME)
+ def test_get_vm_summary_info(self):
+ self._lookup_vm()
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+
+ mock_summary = mock.MagicMock()
+ mock_svc.GetSummaryInformation.return_value = (self._FAKE_RET_VAL,
+ [mock_summary])
+
+ for (key, val) in self._FAKE_SUMMARY_INFO.items():
+ setattr(mock_summary, key, val)
+
+ summary = self._vmutils.get_vm_summary_info(self._FAKE_VM_NAME)
+ self.assertEqual(self._FAKE_SUMMARY_INFO, summary)
+
def _lookup_vm(self):
mock_vm = mock.MagicMock()
self._vmutils._lookup_vm_check = mock.MagicMock(
@@ -47,6 +99,25 @@ def _lookup_vm(self):
mock_vm.path_.return_value = self._FAKE_VM_PATH
return mock_vm
+ def test_lookup_vm_ok(self):
+ mock_vm = mock.MagicMock()
+ self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm]
+ vm = self._vmutils._lookup_vm_check(self._FAKE_VM_NAME)
+ self.assertEqual(mock_vm, vm)
+
+ def test_lookup_vm_multiple(self):
+ mockvm = mock.MagicMock()
+ self._vmutils._conn.Msvm_ComputerSystem.return_value = [mockvm, mockvm]
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils._lookup_vm_check,
+ self._FAKE_VM_NAME)
+
+ def test_lookup_vm_none(self):
+ self._vmutils._conn.Msvm_ComputerSystem.return_value = []
+ self.assertRaises(exception.NotFound,
+ self._vmutils._lookup_vm_check,
+ self._FAKE_VM_NAME)
+
def test_set_vm_memory_static(self):
self._test_set_vm_memory_dynamic(1.0)
@@ -76,6 +147,27 @@ def _test_set_vm_memory_dynamic(self, dynamic_memory_ratio):
else:
self.assertFalse(mock_s.DynamicMemoryEnabled)
+ def test_soft_shutdown_vm(self):
+ mock_vm = self._lookup_vm()
+ mock_shutdown = mock.MagicMock()
+ mock_shutdown.InitiateShutdown.return_value = (self._FAKE_RET_VAL, )
+ mock_vm.associators.return_value = [mock_shutdown]
+
+ with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check:
+ self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME)
+
+ mock_shutdown.InitiateShutdown.assert_called_once_with(
+ Force=False, Reason=mock.ANY)
+ mock_check.assert_called_once_with(self._FAKE_RET_VAL, None)
+
+ def test_soft_shutdown_vm_no_component(self):
+ mock_vm = self._lookup_vm()
+ mock_vm.associators.return_value = []
+
+ with mock.patch.object(self._vmutils, 'check_ret_val') as mock_check:
+ self._vmutils.soft_shutdown_vm(self._FAKE_VM_NAME)
+ self.assertFalse(mock_check.called)
+
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
def test_get_vm_storage_paths(self, mock_get_vm_disks):
self._lookup_vm()
@@ -108,6 +200,7 @@ def test_get_vm_disks(self):
def _create_mock_disks(self):
mock_rasd1 = mock.MagicMock()
mock_rasd1.ResourceSubType = self._vmutils._IDE_DISK_RES_SUB_TYPE
+ mock_rasd1.HostResource = [self._FAKE_VHD_PATH]
mock_rasd1.Connection = [self._FAKE_VHD_PATH]
mock_rasd2 = mock.MagicMock()
@@ -115,3 +208,300 @@ def _create_mock_disks(self):
mock_rasd2.HostResource = [self._FAKE_VOLUME_DRIVE_PATH]
return [mock_rasd1, mock_rasd2]
+
+ @mock.patch.object(vmutils.VMUtils, '_set_vm_vcpus')
+ @mock.patch.object(vmutils.VMUtils, '_set_vm_memory')
+ @mock.patch.object(vmutils.VMUtils, '_get_wmi_obj')
+ def test_create_vm(self, mock_get_wmi_obj, mock_set_mem, mock_set_vcpus):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ getattr(mock_svc, self._DEFINE_SYSTEM).return_value = (
+ None, self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ mock_vm = mock_get_wmi_obj.return_value
+ self._vmutils._conn.Msvm_ComputerSystem.return_value = [mock_vm]
+
+ mock_s = mock.MagicMock()
+ setattr(mock_s,
+ self._SETTING_TYPE,
+ self._VIRTUAL_SYSTEM_TYPE_REALIZED)
+ mock_vm.associators.return_value = [mock_s]
+
+ self._vmutils.create_vm(self._FAKE_VM_NAME, self._FAKE_MEMORY_MB,
+ self._FAKE_VCPUS_NUM, False,
+ self._FAKE_DYNAMIC_MEMORY_RATIO)
+
+ self.assertTrue(getattr(mock_svc, self._DEFINE_SYSTEM).called)
+ mock_set_mem.assert_called_with(mock_vm, mock_s, self._FAKE_MEMORY_MB,
+ self._FAKE_DYNAMIC_MEMORY_RATIO)
+
+ mock_set_vcpus.assert_called_with(mock_vm, mock_s,
+ self._FAKE_VCPUS_NUM,
+ False)
+
+ def test_get_vm_scsi_controller(self):
+ self._prepare_get_vm_controller(self._vmutils._SCSI_CTRL_RES_SUB_TYPE)
+ path = self._vmutils.get_vm_scsi_controller(self._FAKE_VM_NAME)
+ self.assertEqual(self._FAKE_RES_PATH, path)
+
+ def test_get_vm_ide_controller(self):
+ self._prepare_get_vm_controller(self._vmutils._IDE_CTRL_RES_SUB_TYPE)
+ path = self._vmutils.get_vm_ide_controller(self._FAKE_VM_NAME,
+ self._FAKE_ADDRESS)
+ self.assertEqual(self._FAKE_RES_PATH, path)
+
+ def _prepare_get_vm_controller(self, resource_sub_type):
+ mock_vm = self._lookup_vm()
+ mock_vm_settings = mock.MagicMock()
+ mock_rasds = mock.MagicMock()
+ mock_rasds.path_.return_value = self._FAKE_RES_PATH
+ mock_rasds.ResourceSubType = resource_sub_type
+ mock_rasds.Address = self._FAKE_ADDRESS
+ mock_vm_settings.associators.return_value = [mock_rasds]
+ mock_vm.associators.return_value = [mock_vm_settings]
+
+ def _prepare_resources(self, mock_path, mock_subtype, mock_vm_settings):
+ mock_rasds = mock_vm_settings.associators.return_value[0]
+ mock_rasds.path_.return_value = mock_path
+ mock_rasds.ResourceSubType = mock_subtype
+ return mock_rasds
+
+ @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
+ @mock.patch.object(vmutils.VMUtils, '_get_vm_ide_controller')
+ def test_attach_ide_drive(self, mock_get_ide_ctrl, mock_get_new_rsd):
+ mock_vm = self._lookup_vm()
+ mock_rsd = mock_get_new_rsd.return_value
+
+ with mock.patch.object(self._vmutils,
+ '_add_virt_resource') as mock_add_virt_res:
+ self._vmutils.attach_ide_drive(self._FAKE_VM_NAME,
+ self._FAKE_CTRL_PATH,
+ self._FAKE_CTRL_ADDR,
+ self._FAKE_DRIVE_ADDR)
+
+ mock_add_virt_res.assert_called_with(mock_rsd,
+ mock_vm.path_.return_value)
+
+ mock_get_ide_ctrl.assert_called_with(mock_vm, self._FAKE_CTRL_ADDR)
+ self.assertTrue(mock_get_new_rsd.called)
+
+ @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
+ def test_create_scsi_controller(self, mock_get_new_rsd):
+ mock_vm = self._lookup_vm()
+ with mock.patch.object(self._vmutils,
+ '_add_virt_resource') as mock_add_virt_res:
+ self._vmutils.create_scsi_controller(self._FAKE_VM_NAME)
+
+ mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value,
+ mock_vm.path_.return_value)
+
+ @mock.patch.object(vmutils.VMUtils, '_get_new_resource_setting_data')
+ def test_attach_volume_to_controller(self, mock_get_new_rsd):
+ mock_vm = self._lookup_vm()
+ with mock.patch.object(self._vmutils,
+ '_add_virt_resource') as mock_add_virt_res:
+ self._vmutils.attach_volume_to_controller(
+ self._FAKE_VM_NAME, self._FAKE_CTRL_PATH, self._FAKE_CTRL_ADDR,
+ self._FAKE_MOUNTED_DISK_PATH)
+
+ mock_add_virt_res.assert_called_with(mock_get_new_rsd.return_value,
+ mock_vm.path_.return_value)
+
+ @mock.patch.object(vmutils.VMUtils, '_modify_virt_resource')
+ @mock.patch.object(vmutils.VMUtils, '_get_nic_data_by_name')
+ def test_set_nic_connection(self, mock_get_nic_conn, mock_modify_virt_res):
+ self._lookup_vm()
+ mock_nic = mock_get_nic_conn.return_value
+ self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
+
+ mock_modify_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH)
+
+ @mock.patch.object(vmutils.VMUtils, '_get_new_setting_data')
+ def test_create_nic(self, mock_get_new_virt_res):
+ self._lookup_vm()
+ mock_nic = mock_get_new_virt_res.return_value
+
+ with mock.patch.object(self._vmutils,
+ '_add_virt_resource') as mock_add_virt_res:
+ self._vmutils.create_nic(
+ self._FAKE_VM_NAME, self._FAKE_RES_NAME, self._FAKE_ADDRESS)
+
+ mock_add_virt_res.assert_called_with(mock_nic, self._FAKE_VM_PATH)
+
+ def test_set_vm_state(self):
+ mock_vm = self._lookup_vm()
+ mock_vm.RequestStateChange.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vmutils.set_vm_state(self._FAKE_VM_NAME,
+ constants.HYPERV_VM_STATE_ENABLED)
+ mock_vm.RequestStateChange.assert_called_with(
+ constants.HYPERV_VM_STATE_ENABLED)
+
+ def test_destroy_vm(self):
+ self._lookup_vm()
+
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ getattr(mock_svc, self._DESTROY_SYSTEM).return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vmutils.destroy_vm(self._FAKE_VM_NAME)
+
+ getattr(mock_svc, self._DESTROY_SYSTEM).assert_called_with(
+ self._FAKE_VM_PATH)
+
+ @mock.patch.object(vmutils.VMUtils, '_wait_for_job')
+ def test_check_ret_val_ok(self, mock_wait_for_job):
+ self._vmutils.check_ret_val(constants.WMI_JOB_STATUS_STARTED,
+ self._FAKE_JOB_PATH)
+ mock_wait_for_job.assert_called_once_with(self._FAKE_JOB_PATH)
+
+ def test_check_ret_val_exception(self):
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils.check_ret_val,
+ self._FAKE_RET_VAL_BAD,
+ self._FAKE_JOB_PATH)
+
+ def test_wait_for_job_done(self):
+ mockjob = self._prepare_wait_for_job(constants.WMI_JOB_STATE_COMPLETED)
+ job = self._vmutils._wait_for_job(self._FAKE_JOB_PATH)
+ self.assertEqual(mockjob, job)
+
+ def test_wait_for_job_exception_concrete_job(self):
+ mock_job = self._prepare_wait_for_job()
+ mock_job.path.return_value.Class = self._CONCRETE_JOB
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils._wait_for_job,
+ self._FAKE_JOB_PATH)
+
+ def test_wait_for_job_exception_with_error(self):
+ mock_job = self._prepare_wait_for_job()
+ mock_job.GetError.return_value = (self._FAKE_ERROR, self._FAKE_RET_VAL)
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils._wait_for_job,
+ self._FAKE_JOB_PATH)
+
+ def test_wait_for_job_exception_no_error(self):
+ mock_job = self._prepare_wait_for_job()
+ mock_job.GetError.return_value = (None, None)
+ self.assertRaises(vmutils.HyperVException,
+ self._vmutils._wait_for_job,
+ self._FAKE_JOB_PATH)
+
+ def _prepare_wait_for_job(self, state=_FAKE_JOB_STATUS_BAD):
+ mock_job = mock.MagicMock()
+ mock_job.JobState = state
+ mock_job.Description = self._FAKE_JOB_DESCRIPTION
+ mock_job.ElapsedTime = self._FAKE_ELAPSED_TIME
+
+ self._vmutils._get_wmi_obj = mock.MagicMock(return_value=mock_job)
+ return mock_job
+
+ def test_add_virt_resource(self):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ getattr(mock_svc, self._ADD_RESOURCE).return_value = (
+ self._FAKE_JOB_PATH, mock.MagicMock(), self._FAKE_RET_VAL)
+ mock_res_setting_data = mock.MagicMock()
+ mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
+
+ self._vmutils._add_virt_resource(mock_res_setting_data,
+ self._FAKE_VM_PATH)
+ self._assert_add_resources(mock_svc)
+
+ def test_modify_virt_resource(self):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ mock_svc.ModifyVirtualSystemResources.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+ mock_res_setting_data = mock.MagicMock()
+ mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
+
+ self._vmutils._modify_virt_resource(mock_res_setting_data,
+ self._FAKE_VM_PATH)
+
+ mock_svc.ModifyVirtualSystemResources.assert_called_with(
+ ResourceSettingData=[self._FAKE_RES_DATA],
+ ComputerSystem=self._FAKE_VM_PATH)
+
+ def test_remove_virt_resource(self):
+ mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+ getattr(mock_svc, self._REMOVE_RESOURCE).return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+ mock_res_setting_data = mock.MagicMock()
+ mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH
+
+ self._vmutils._remove_virt_resource(mock_res_setting_data,
+ self._FAKE_VM_PATH)
+ self._assert_remove_resources(mock_svc)
+
+ @mock.patch.object(vmutils, 'wmi', create=True)
+ @mock.patch.object(vmutils.VMUtils, 'check_ret_val')
+ def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
+ self._lookup_vm()
+
+ mock_svc = self._get_snapshot_service()
+ mock_svc.CreateVirtualSystemSnapshot.return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL, mock.MagicMock())
+
+ self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
+
+ mock_svc.CreateVirtualSystemSnapshot.assert_called_with(
+ self._FAKE_VM_PATH)
+
+ mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
+ self._FAKE_JOB_PATH)
+
+ def test_remove_vm_snapshot(self):
+ mock_svc = self._get_snapshot_service()
+ getattr(mock_svc, self._DESTROY_SNAPSHOT).return_value = (
+ self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
+
+ self._vmutils.remove_vm_snapshot(self._FAKE_SNAPSHOT_PATH)
+ getattr(mock_svc, self._DESTROY_SNAPSHOT).assert_called_with(
+ self._FAKE_SNAPSHOT_PATH)
+
+ def test_detach_vm_disk(self):
+ self._lookup_vm()
+ mock_disk = self._prepare_mock_disk()
+
+ with mock.patch.object(self._vmutils,
+ '_remove_virt_resource') as mock_rm_virt_res:
+ self._vmutils.detach_vm_disk(self._FAKE_VM_NAME,
+ self._FAKE_HOST_RESOURCE)
+
+ mock_rm_virt_res.assert_called_with(mock_disk, self._FAKE_VM_PATH)
+
+ def test_get_mounted_disk_resource_from_path(self):
+ mock_disk_1 = mock.MagicMock()
+ mock_disk_2 = mock.MagicMock()
+ mock_disk_2.HostResource = [self._FAKE_MOUNTED_DISK_PATH]
+ self._vmutils._conn.query.return_value = [mock_disk_1, mock_disk_2]
+
+ physical_disk = self._vmutils._get_mounted_disk_resource_from_path(
+ self._FAKE_MOUNTED_DISK_PATH)
+
+ self.assertEqual(mock_disk_2, physical_disk)
+
+ def test_get_controller_volume_paths(self):
+ self._prepare_mock_disk()
+ mock_disks = {self._FAKE_RES_PATH: self._FAKE_HOST_RESOURCE}
+ disks = self._vmutils.get_controller_volume_paths(self._FAKE_RES_PATH)
+ self.assertEqual(mock_disks, disks)
+
+ def _prepare_mock_disk(self):
+ mock_disk = mock.MagicMock()
+ mock_disk.HostResource = [self._FAKE_HOST_RESOURCE]
+ mock_disk.path.return_value.RelPath = self._FAKE_RES_PATH
+ mock_disk.ResourceSubType = self._vmutils._IDE_DISK_RES_SUB_TYPE
+ self._vmutils._conn.query.return_value = [mock_disk]
+
+ return mock_disk
+
+ def _get_snapshot_service(self):
+ return self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
+
+ def _assert_add_resources(self, mock_svc):
+ getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
+ [self._FAKE_RES_DATA], self._FAKE_VM_PATH)
+
+ def _assert_remove_resources(self, mock_svc):
+ getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
+ [self._FAKE_RES_PATH], self._FAKE_VM_PATH)
diff --git a/nova/tests/virt/hyperv/test_vmutilsv2.py b/nova/tests/virt/hyperv/test_vmutilsv2.py
index 2a49acf5cb..f6ab55ff1d 100644
--- a/nova/tests/virt/hyperv/test_vmutilsv2.py
+++ b/nova/tests/virt/hyperv/test_vmutilsv2.py
@@ -1,4 +1,4 @@
-# Copyright 2013 Cloudbase Solutions Srl
+# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -14,159 +14,51 @@
import mock
-from nova import test
+from nova.tests.virt.hyperv import test_vmutils
from nova.virt.hyperv import vmutilsv2
-class VMUtilsV2TestCase(test.NoDBTestCase):
+class VMUtilsV2TestCase(test_vmutils.VMUtilsTestCase):
"""Unit tests for the Hyper-V VMUtilsV2 class."""
- _FAKE_VM_NAME = 'fake_vm'
- _FAKE_MEMORY_MB = 2
- _FAKE_VCPUS_NUM = 4
- _FAKE_JOB_PATH = 'fake_job_path'
- _FAKE_RET_VAL = 0
- _FAKE_CTRL_PATH = 'fake_ctrl_path'
- _FAKE_CTRL_ADDR = 0
- _FAKE_DRIVE_ADDR = 0
- _FAKE_MOUNTED_DISK_PATH = 'fake_mounted_disk_path'
- _FAKE_VM_PATH = "fake_vm_path"
- _FAKE_ENABLED_STATE = 1
- _FAKE_SNAPSHOT_PATH = "_FAKE_SNAPSHOT_PATH"
- _FAKE_RES_DATA = "fake_res_data"
- _FAKE_RES_PATH = "fake_res_path"
- _FAKE_DYNAMIC_MEMORY_RATIO = 1.0
- _FAKE_VHD_PATH = "fake_vhd_path"
- _FAKE_VOLUME_DRIVE_PATH = "fake_volume_drive_path"
+ _DEFINE_SYSTEM = 'DefineSystem'
+ _DESTROY_SYSTEM = 'DestroySystem'
+ _DESTROY_SNAPSHOT = 'DestroySnapshot'
+
+ _ADD_RESOURCE = 'AddResourceSettings'
+ _REMOVE_RESOURCE = 'RemoveResourceSettings'
+ _SETTING_TYPE = 'VirtualSystemType'
+
+ _VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
def setUp(self):
+ super(VMUtilsV2TestCase, self).setUp()
self._vmutils = vmutilsv2.VMUtilsV2()
self._vmutils._conn = mock.MagicMock()
- super(VMUtilsV2TestCase, self).setUp()
-
- def _lookup_vm(self):
- mock_vm = mock.MagicMock()
- self._vmutils._lookup_vm_check = mock.MagicMock(
- return_value=mock_vm)
- mock_vm.path_.return_value = self._FAKE_VM_PATH
- return mock_vm
-
- def test_create_vm(self):
- mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
- mock_svc.DefineSystem.return_value = (None, self._FAKE_JOB_PATH,
- self._FAKE_RET_VAL)
-
- self._vmutils._get_wmi_obj = mock.MagicMock()
- mock_vm = self._vmutils._get_wmi_obj.return_value
-
- mock_s = mock.MagicMock()
- mock_s.VirtualSystemType = self._vmutils._VIRTUAL_SYSTEM_TYPE_REALIZED
- mock_vm.associators.return_value = [mock_s]
-
- self._vmutils._set_vm_memory = mock.MagicMock()
- self._vmutils._set_vm_vcpus = mock.MagicMock()
-
- self._vmutils.create_vm(self._FAKE_VM_NAME, self._FAKE_MEMORY_MB,
- self._FAKE_VCPUS_NUM, False,
- self._FAKE_DYNAMIC_MEMORY_RATIO)
-
- self.assertTrue(mock_svc.DefineSystem.called)
- self._vmutils._set_vm_memory.assert_called_with(
- mock_vm, mock_s, self._FAKE_MEMORY_MB,
- self._FAKE_DYNAMIC_MEMORY_RATIO)
-
- self._vmutils._set_vm_vcpus.assert_called_with(mock_vm, mock_s,
- self._FAKE_VCPUS_NUM,
- False)
-
- def test_attach_ide_drive(self):
- self._lookup_vm()
- self._vmutils._get_vm_ide_controller = mock.MagicMock()
- self._vmutils._get_new_resource_setting_data = mock.MagicMock()
- self._vmutils._add_virt_resource = mock.MagicMock()
-
- self._vmutils.attach_ide_drive(self._FAKE_VM_NAME,
- self._FAKE_CTRL_PATH,
- self._FAKE_CTRL_ADDR,
- self._FAKE_DRIVE_ADDR)
-
- self.assertTrue(self._vmutils._get_vm_ide_controller.called)
- self.assertTrue(self._vmutils._get_new_resource_setting_data.called)
- self.assertTrue(self._vmutils._add_virt_resource.called)
-
- def test_attach_volume_to_controller(self):
- self._lookup_vm()
- self._vmutils._add_virt_resource = mock.MagicMock()
-
- self._vmutils.attach_volume_to_controller(self._FAKE_VM_NAME,
- self._FAKE_CTRL_PATH,
- self._FAKE_CTRL_ADDR,
- self._FAKE_MOUNTED_DISK_PATH)
-
- self.assertTrue(self._vmutils._add_virt_resource.called)
-
- def test_create_scsi_controller(self):
- self._lookup_vm()
- self._vmutils._add_virt_resource = mock.MagicMock()
-
- self._vmutils.create_scsi_controller(self._FAKE_VM_NAME)
-
- self.assertTrue(self._vmutils._add_virt_resource.called)
-
- def test_get_vm_storage_paths(self):
- mock_vm = self._lookup_vm()
-
- mock_vmsettings = [mock.MagicMock()]
- mock_vm.associators.return_value = mock_vmsettings
- mock_sasds = []
- mock_sasd1 = mock.MagicMock()
- mock_sasd1.ResourceSubType = self._vmutils._IDE_DISK_RES_SUB_TYPE
- mock_sasd1.HostResource = [self._FAKE_VHD_PATH]
- mock_sasd2 = mock.MagicMock()
- mock_sasd2.ResourceSubType = self._vmutils._PHYS_DISK_RES_SUB_TYPE
- mock_sasd2.HostResource = [self._FAKE_VOLUME_DRIVE_PATH]
- mock_sasds.append(mock_sasd1)
- mock_sasds.append(mock_sasd2)
- mock_vmsettings[0].associators.return_value = mock_sasds
-
- storage = self._vmutils.get_vm_storage_paths(self._FAKE_VM_NAME)
- (disk_files, volume_drives) = storage
-
- mock_vm.associators.assert_called_with(
- wmi_result_class='Msvm_VirtualSystemSettingData')
- mock_vmsettings[0].associators.assert_called_with(
- wmi_result_class='Msvm_StorageAllocationSettingData')
- self.assertEqual([self._FAKE_VHD_PATH], disk_files)
- self.assertEqual([self._FAKE_VOLUME_DRIVE_PATH], volume_drives)
-
- def test_destroy(self):
- self._lookup_vm()
-
+ def test_modify_virt_resource(self):
mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
- mock_svc.DestroySystem.return_value = (self._FAKE_JOB_PATH,
- self._FAKE_RET_VAL)
-
- self._vmutils.destroy_vm(self._FAKE_VM_NAME)
-
- mock_svc.DestroySystem.assert_called_with(self._FAKE_VM_PATH)
-
- def test_get_vm_state(self):
- self._vmutils.get_vm_summary_info = mock.MagicMock(
- return_value={'EnabledState': self._FAKE_ENABLED_STATE})
+ mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH,
+ mock.MagicMock(),
+ self._FAKE_RET_VAL)
+ mock_res_setting_data = mock.MagicMock()
+ mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
- enabled_state = self._vmutils.get_vm_state(self._FAKE_VM_NAME)
+ self._vmutils._modify_virt_resource(mock_res_setting_data,
+ self._FAKE_VM_PATH)
- self.assertEqual(self._FAKE_ENABLED_STATE, enabled_state)
+ mock_svc.ModifyResourceSettings.assert_called_with(
+ ResourceSettings=[self._FAKE_RES_DATA])
- def test_take_vm_snapshot(self):
+ @mock.patch.object(vmutilsv2, 'wmi', create=True)
+ @mock.patch.object(vmutilsv2.VMUtilsV2, 'check_ret_val')
+ def test_take_vm_snapshot(self, mock_check_ret_val, mock_wmi):
self._lookup_vm()
- mock_svc = self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
+ mock_svc = self._get_snapshot_service()
mock_svc.CreateSnapshot.return_value = (self._FAKE_JOB_PATH,
mock.MagicMock(),
self._FAKE_RET_VAL)
- vmutilsv2.wmi = mock.MagicMock()
self._vmutils.take_vm_snapshot(self._FAKE_VM_NAME)
@@ -174,70 +66,19 @@ def test_take_vm_snapshot(self):
AffectedSystem=self._FAKE_VM_PATH,
SnapshotType=self._vmutils._SNAPSHOT_FULL)
- def test_remove_vm_snapshot(self):
- mock_svc = self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
- mock_svc.DestroySnapshot.return_value = (self._FAKE_JOB_PATH,
- self._FAKE_RET_VAL)
-
- self._vmutils.remove_vm_snapshot(self._FAKE_SNAPSHOT_PATH)
-
- mock_svc.DestroySnapshot.assert_called_with(self._FAKE_SNAPSHOT_PATH)
+ mock_check_ret_val.assert_called_once_with(self._FAKE_RET_VAL,
+ self._FAKE_JOB_PATH)
- def test_set_nic_connection(self):
+ @mock.patch.object(vmutilsv2.VMUtilsV2, '_add_virt_resource')
+ @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_new_setting_data')
+ @mock.patch.object(vmutilsv2.VMUtilsV2, '_get_nic_data_by_name')
+ def test_set_nic_connection(self, mock_get_nic_data, mock_get_new_sd,
+ mock_add_virt_res):
self._lookup_vm()
-
- self._vmutils._get_nic_data_by_name = mock.MagicMock()
- self._vmutils._add_virt_resource = mock.MagicMock()
-
- fake_eth_port = mock.MagicMock()
- self._vmutils._get_new_setting_data = mock.MagicMock(
- return_value=fake_eth_port)
+ fake_eth_port = mock_get_new_sd.return_value
self._vmutils.set_nic_connection(self._FAKE_VM_NAME, None, None)
-
- self._vmutils._add_virt_resource.assert_called_with(fake_eth_port,
- self._FAKE_VM_PATH)
-
- def test_add_virt_resource(self):
- mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
- mock_svc.AddResourceSettings.return_value = (self._FAKE_JOB_PATH,
- mock.MagicMock(),
- self._FAKE_RET_VAL)
- mock_res_setting_data = mock.MagicMock()
- mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
-
- self._vmutils._add_virt_resource(mock_res_setting_data,
- self._FAKE_VM_PATH)
-
- mock_svc.AddResourceSettings.assert_called_with(self._FAKE_VM_PATH,
- [self._FAKE_RES_DATA])
-
- def test_modify_virt_resource(self):
- mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
- mock_svc.ModifyResourceSettings.return_value = (self._FAKE_JOB_PATH,
- mock.MagicMock(),
- self._FAKE_RET_VAL)
- mock_res_setting_data = mock.MagicMock()
- mock_res_setting_data.GetText_.return_value = self._FAKE_RES_DATA
-
- self._vmutils._modify_virt_resource(mock_res_setting_data,
- self._FAKE_VM_PATH)
-
- mock_svc.ModifyResourceSettings.assert_called_with(
- ResourceSettings=[self._FAKE_RES_DATA])
-
- def test_remove_virt_resource(self):
- mock_svc = self._vmutils._conn.Msvm_VirtualSystemManagementService()[0]
- mock_svc.RemoveResourceSettings.return_value = (self._FAKE_JOB_PATH,
- self._FAKE_RET_VAL)
- mock_res_setting_data = mock.MagicMock()
- mock_res_setting_data.path_.return_value = self._FAKE_RES_PATH
-
- self._vmutils._remove_virt_resource(mock_res_setting_data,
- self._FAKE_VM_PATH)
-
- mock_svc.RemoveResourceSettings.assert_called_with(
- [self._FAKE_RES_PATH])
+ mock_add_virt_res.assert_called_with(fake_eth_port, self._FAKE_VM_PATH)
@mock.patch('nova.virt.hyperv.vmutils.VMUtils._get_vm_disks')
def test_enable_vm_metrics_collection(self, mock_get_vm_disks):
@@ -266,3 +107,14 @@ def test_enable_vm_metrics_collection(self, mock_get_vm_disks):
MetricCollectionEnabled=self._vmutils._METRIC_ENABLED))
mock_svc.ControlMetrics.assert_has_calls(calls, any_order=True)
+
+ def _get_snapshot_service(self):
+ return self._vmutils._conn.Msvm_VirtualSystemSnapshotService()[0]
+
+ def _assert_add_resources(self, mock_svc):
+ getattr(mock_svc, self._ADD_RESOURCE).assert_called_with(
+ self._FAKE_VM_PATH, [self._FAKE_RES_DATA])
+
+ def _assert_remove_resources(self, mock_svc):
+ getattr(mock_svc, self._REMOVE_RESOURCE).assert_called_with(
+ [self._FAKE_RES_PATH])
diff --git a/nova/tests/virt/hyperv/test_volumeutils.py b/nova/tests/virt/hyperv/test_volumeutils.py
index b554222103..f44ee14594 100644
--- a/nova/tests/virt/hyperv/test_volumeutils.py
+++ b/nova/tests/virt/hyperv/test_volumeutils.py
@@ -1,5 +1,7 @@
# Copyright 2014 Cloudbase Solutions Srl
#
+# All Rights Reserved.
+#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -15,7 +17,7 @@
import mock
from oslo.config import cfg
-from nova import test
+from nova.tests.virt.hyperv import test_basevolumeutils
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeutils
@@ -24,7 +26,7 @@
'hyperv')
-class VolumeUtilsTestCase(test.NoDBTestCase):
+class VolumeUtilsTestCase(test_basevolumeutils.BaseVolumeUtilsTestCase):
"""Unit tests for the Hyper-V VolumeUtils class."""
_FAKE_PORTAL_ADDR = '10.1.1.1'
@@ -32,10 +34,13 @@ class VolumeUtilsTestCase(test.NoDBTestCase):
_FAKE_LUN = 0
_FAKE_TARGET = 'iqn.2010-10.org.openstack:fake_target'
+ _FAKE_STDOUT_VALUE = 'The operation completed successfully'
+
def setUp(self):
super(VolumeUtilsTestCase, self).setUp()
self._volutils = volumeutils.VolumeUtils()
self._volutils._conn_wmi = mock.MagicMock()
+ self._volutils._conn_cimv2 = mock.MagicMock()
self.flags(volume_attach_retry_count=4, group='hyperv')
self.flags(volume_attach_retry_interval=0, group='hyperv')
@@ -132,3 +137,15 @@ def test_execute_raise_exception(self):
def test_execute_exception(self):
self._test_execute_wrapper(False)
+
+ @mock.patch.object(volumeutils, 'utils')
+ def test_logout_storage_target(self, mock_utils):
+ mock_utils.execute.return_value = (self._FAKE_STDOUT_VALUE,
+ mock.sentinel.FAKE_STDERR_VALUE)
+ session = mock.MagicMock()
+ session.SessionId = mock.sentinel.FAKE_SESSION_ID
+ self._volutils._conn_wmi.query.return_value = [session]
+
+ self._volutils.logout_storage_target(mock.sentinel.FAKE_IQN)
+ mock_utils.execute.assert_called_once_with(
+ 'iscsicli.exe', 'logouttarget', mock.sentinel.FAKE_SESSION_ID)
diff --git a/nova/tests/virt/hyperv/test_volumeutilsv2.py b/nova/tests/virt/hyperv/test_volumeutilsv2.py
index b933e3331d..1c242b71f8 100644
--- a/nova/tests/virt/hyperv/test_volumeutilsv2.py
+++ b/nova/tests/virt/hyperv/test_volumeutilsv2.py
@@ -36,6 +36,7 @@ def setUp(self):
super(VolumeUtilsV2TestCase, self).setUp()
self._volutilsv2 = volumeutilsv2.VolumeUtilsV2()
self._volutilsv2._conn_storage = mock.MagicMock()
+ self._volutilsv2._conn_wmi = mock.MagicMock()
self.flags(volume_attach_retry_count=4, group='hyperv')
self.flags(volume_attach_retry_interval=0, group='hyperv')
@@ -110,3 +111,37 @@ def test_login_disconncted_target(self):
def test_login_target_exception(self):
self._test_login_target(False, True)
+
+ def test_logout_storage_target(self):
+ mock_msft_target = self._volutilsv2._conn_storage.MSFT_iSCSITarget
+ mock_msft_session = self._volutilsv2._conn_storage.MSFT_iSCSISession
+
+ mock_target = mock.MagicMock()
+ mock_target.IsConnected = True
+ mock_msft_target.return_value = [mock_target]
+
+ mock_session = mock.MagicMock()
+ mock_session.IsPersistent = True
+ mock_msft_session.return_value = [mock_session]
+
+ self._volutilsv2.logout_storage_target(self._FAKE_TARGET)
+
+ mock_msft_target.assert_called_once_with(NodeAddress=self._FAKE_TARGET)
+ mock_msft_session.assert_called_once_with(
+ TargetNodeAddress=self._FAKE_TARGET)
+
+ mock_session.Unregister.assert_called_once_with()
+ mock_target.Disconnect.assert_called_once_with()
+
+ @mock.patch.object(volumeutilsv2.VolumeUtilsV2, 'logout_storage_target')
+ def test_execute_log_out(self, mock_logout_target):
+ sess_class = self._volutilsv2._conn_wmi.MSiSCSIInitiator_SessionClass
+
+ mock_session = mock.MagicMock()
+ sess_class.return_value = [mock_session]
+
+ self._volutilsv2.execute_log_out(mock.sentinel.FAKE_SESSION_ID)
+
+ sess_class.assert_called_once_with(
+ SessionId=mock.sentinel.FAKE_SESSION_ID)
+ mock_logout_target.assert_called_once_with(mock_session.TargetName)
diff --git a/nova/tests/virt/libvirt/fake_imagebackend.py b/nova/tests/virt/libvirt/fake_imagebackend.py
index ef09fdcc52..f2a0de969b 100644
--- a/nova/tests/virt/libvirt/fake_imagebackend.py
+++ b/nova/tests/virt/libvirt/fake_imagebackend.py
@@ -53,6 +53,21 @@ def libvirt_info(self, disk_bus, disk_dev, device_type,
return FakeImage(instance, name)
def snapshot(self, path, image_type=''):
- #NOTE(bfilippov): this is done in favor for
+ # NOTE(bfilippov): this is done in favor for
# snapshot tests in test_libvirt.LibvirtConnTestCase
return imagebackend.Backend(True).snapshot(path, image_type)
+
+
+class Raw(imagebackend.Image):
+ # NOTE(spandhe) Added for test_rescue and test_rescue_config_drive
+ def __init__(self, instance=None, disk_name=None, path=None):
+ pass
+
+ def _get_driver_format(self):
+ pass
+
+ def correct_format(self):
+ pass
+
+ def create_image(self, prepare_template, base, size, *args, **kwargs):
+ pass
diff --git a/nova/tests/virt/libvirt/fake_libvirt_utils.py b/nova/tests/virt/libvirt/fake_libvirt_utils.py
index 1585e60d92..eb58c30e68 100644
--- a/nova/tests/virt/libvirt/fake_libvirt_utils.py
+++ b/nova/tests/virt/libvirt/fake_libvirt_utils.py
@@ -90,6 +90,10 @@ def create_cow_image(backing_file, path):
pass
+def get_disk_size(path):
+ return 0
+
+
def get_disk_backing_file(path):
return disk_backing_files.get(path, None)
@@ -110,10 +114,6 @@ def create_lvm_image(vg, lv, size, sparse=False):
pass
-def import_rbd_image(path, *args):
- pass
-
-
def volume_group_free_space(vg):
pass
@@ -194,17 +194,5 @@ def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
return "qemu"
-def list_rbd_volumes(pool):
- fake_volumes = ['875a8070-d0b9-4949-8b31-104d125c9a64.local',
- '875a8070-d0b9-4949-8b31-104d125c9a64.swap',
- '875a8070-d0b9-4949-8b31-104d125c9a64',
- 'wrong875a8070-d0b9-4949-8b31-104d125c9a64']
- return fake_volumes
-
-
-def remove_rbd_volumes(pool, *names):
- pass
-
-
def get_arch(image_meta):
pass
diff --git a/nova/tests/virt/libvirt/fakelibvirt.py b/nova/tests/virt/libvirt/fakelibvirt.py
index e3a3db978e..095cd1a50c 100644
--- a/nova/tests/virt/libvirt/fakelibvirt.py
+++ b/nova/tests/virt/libvirt/fakelibvirt.py
@@ -17,7 +17,7 @@
import time
import uuid
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
# Allow passing None to the various connect methods
# (i.e. allow the client to rely on default URLs)
@@ -73,6 +73,13 @@ def _reset():
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_XML_SECURE = 1
+VIR_DOMAIN_XML_INACTIVE = 2
+
+VIR_DOMAIN_BLOCK_REBASE_SHALLOW = 1
+VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT = 2
+VIR_DOMAIN_BLOCK_REBASE_COPY = 8
+
+VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT = 2
VIR_DOMAIN_EVENT_ID_LIFECYCLE = 0
@@ -106,8 +113,11 @@ def _reset():
VIR_CRED_REALM = 8
VIR_CRED_EXTERNAL = 9
+VIR_MIGRATE_LIVE = 1
VIR_MIGRATE_PEER2PEER = 2
+VIR_MIGRATE_TUNNELLED = 4
VIR_MIGRATE_UNDEFINE_SOURCE = 16
+VIR_MIGRATE_NON_SHARED_INC = 128
VIR_NODE_CPU_STATS_ALL_CPUS = -1
@@ -143,6 +153,9 @@ def _reset():
VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
+# blockCommit flags
+VIR_DOMAIN_BLOCK_COMMIT_RELATIVE = 4
+
VIR_CONNECT_LIST_DOMAINS_ACTIVE = 1
VIR_CONNECT_LIST_DOMAINS_INACTIVE = 2
@@ -597,7 +610,7 @@ def delete(self, flags):
class Connection(object):
- def __init__(self, uri=None, readonly=False, version=9007):
+ def __init__(self, uri=None, readonly=False, version=9011):
if not uri or uri == '':
if allow_default_uri_connection:
uri = 'qemu:///session'
diff --git a/nova/tests/virt/libvirt/test_blockinfo.py b/nova/tests/virt/libvirt/test_blockinfo.py
index 62f8589362..b5a3cd3740 100644
--- a/nova/tests/virt/libvirt/test_blockinfo.py
+++ b/nova/tests/virt/libvirt/test_blockinfo.py
@@ -205,6 +205,7 @@ def test_get_disk_mapping_lxc(self):
# A simple disk mapping setup, but for lxc
user_context = context.RequestContext(self.user_id, self.project_id)
+ self.test_instance['ephemeral_gb'] = 0
instance_ref = db.instance_create(user_context, self.test_instance)
mapping = blockinfo.get_disk_mapping("lxc", instance_ref,
@@ -261,6 +262,9 @@ def test_get_disk_mapping_simple_swap(self):
def test_get_disk_mapping_simple_configdrive(self):
# A simple disk mapping setup, but with configdrive added
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
self.flags(force_config_drive=True)
@@ -270,18 +274,32 @@ def test_get_disk_mapping_simple_configdrive(self):
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide")
+ # The last device is selected for this. on x86 is the last ide
+ # device (hdd). Since power only support scsi, the last device
+ # is sdz
+
+ bus_ppc = ("scsi", "sdz")
+ expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
+
+ bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
+ ("ide", "hdd"))
+
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
- 'disk.config': {'bus': 'ide', 'dev': 'hdd', 'type': 'cdrom'},
+ 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'}
}
+
self.assertEqual(expect, mapping)
def test_get_disk_mapping_cdrom_configdrive(self):
# A simple disk mapping setup, with configdrive added as cdrom
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
self.flags(force_config_drive=True)
self.flags(config_drive_format='iso9660')
@@ -292,14 +310,21 @@ def test_get_disk_mapping_cdrom_configdrive(self):
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide")
+ bus_ppc = ("scsi", "sdz")
+ expect_bus = {"ppc": bus_ppc, "ppc64": bus_ppc}
+
+ bus, dev = expect_bus.get(blockinfo.libvirt_utils.get_arch({}),
+ ("ide", "hdd"))
+
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'},
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
- 'disk.config': {'bus': 'ide', 'dev': 'hdd', 'type': 'cdrom'},
+ 'disk.config': {'bus': bus, 'dev': dev, 'type': 'cdrom'},
'root': {'bus': 'virtio', 'dev': 'vda',
'type': 'disk', 'boot_index': '1'}
}
+
self.assertEqual(expect, mapping)
def test_get_disk_mapping_disk_configdrive(self):
@@ -868,6 +893,15 @@ def setUp(self):
'disk_bus': 'virtio',
'destination_type': 'volume',
'snapshot_id': 'fake-snapshot-id-1',
+ 'boot_index': -1})),
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 5, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/vde',
+ 'source_type': 'blank',
+ 'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'destination_type': 'volume',
'boot_index': -1}))]
def tearDown(self):
@@ -890,11 +924,14 @@ def test_only_block_device_mapping(self):
original_bdm, self.block_device_mapping):
self.assertEqual(original.device_name, defaulted.device_name)
- # Asser it defaults the missing one as expected
+ # Assert it defaults the missing one as expected
self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names([], [], self.block_device_mapping)
self.assertEqual('/dev/vdd',
self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
def test_with_ephemerals(self):
# Test ephemeral gets assigned
@@ -904,10 +941,13 @@ def test_with_ephemerals(self):
self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names(self.ephemerals, [],
self.block_device_mapping)
self.assertEqual('/dev/vdd',
self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
def test_with_swap(self):
# Test swap only
@@ -918,11 +958,14 @@ def test_with_swap(self):
# Test swap and block_device_mapping
self.swap[0]['device_name'] = None
self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names([], self.swap,
self.block_device_mapping)
self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
self.assertEqual('/dev/vdd',
self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
def test_all_together(self):
# Test swap missing
@@ -943,9 +986,12 @@ def test_all_together(self):
self.swap[0]['device_name'] = None
self.ephemerals[0]['device_name'] = None
self.block_device_mapping[1]['device_name'] = None
+ self.block_device_mapping[2]['device_name'] = None
self._test_default_device_names(self.ephemerals,
self.swap, self.block_device_mapping)
self.assertEqual('/dev/vdb', self.ephemerals[0]['device_name'])
self.assertEqual('/dev/vdc', self.swap[0]['device_name'])
self.assertEqual('/dev/vdd',
self.block_device_mapping[1]['device_name'])
+ self.assertEqual('/dev/vde',
+ self.block_device_mapping[2]['device_name'])
diff --git a/nova/tests/virt/libvirt/test_config.py b/nova/tests/virt/libvirt/test_config.py
index 8d14eb9f8d..a45088c1cb 100644
--- a/nova/tests/virt/libvirt/test_config.py
+++ b/nova/tests/virt/libvirt/test_config.py
@@ -70,6 +70,28 @@ def test_config_host(self):
+
+
+
+ 4048280
+
+
+
+
+
+
+ |
+
+ 4127684
+
+
+
+
+
+
+ |
+
+
hvm
@@ -207,6 +229,34 @@ def test_config_simple(self):
""")
+class LibvirtConfigGuestCPUNUMATest(LibvirtConfigBaseTest):
+
+ def test_config_simple(self):
+ obj = config.LibvirtConfigGuestCPUNUMA()
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 0
+ cell.cpus = set([0, 1])
+ cell.memory = 1000000
+
+ obj.cells.append(cell)
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 1
+ cell.cpus = set([2, 3])
+ cell.memory = 1500000
+
+ obj.cells.append(cell)
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+
+ |
+ |
+
+ """)
+
+
class LibvirtConfigCPUTest(LibvirtConfigBaseTest):
def test_config_simple(self):
@@ -322,6 +372,39 @@ def test_config_host(self):
""")
+ def test_config_host_with_numa(self):
+ obj = config.LibvirtConfigGuestCPU()
+ obj.mode = "host-model"
+ obj.match = "exact"
+
+ numa = config.LibvirtConfigGuestCPUNUMA()
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 0
+ cell.cpus = set([0, 1])
+ cell.memory = 1000000
+
+ numa.cells.append(cell)
+
+ cell = config.LibvirtConfigGuestCPUNUMACell()
+ cell.id = 1
+ cell.cpus = set([2, 3])
+ cell.memory = 1500000
+
+ numa.cells.append(cell)
+
+ obj.numa = numa
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+
+
+ |
+ |
+
+
+ """)
+
class LibvirtConfigGuestSMBIOSTest(LibvirtConfigBaseTest):
@@ -681,6 +764,67 @@ def test_config_file_parse(self):
self.assertEqual(obj.target_bus, 'ide')
+class LibvirtConfigGuestDiskBackingStoreTest(LibvirtConfigBaseTest):
+
+ def test_config_file_parse(self):
+ xml = """
+
+
+
+
+
+
+
+
+ """
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDiskBackingStore()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.driver_name, 'qemu')
+ self.assertEqual(obj.driver_format, 'qcow2')
+ self.assertEqual(obj.source_type, 'file')
+ self.assertEqual(obj.source_file, '/var/lib/libvirt/images/mid.qcow2')
+ self.assertEqual(obj.backing_store.driver_name, 'qemu')
+ self.assertEqual(obj.backing_store.source_type, 'file')
+ self.assertEqual(obj.backing_store.source_file,
+ '/var/lib/libvirt/images/base.qcow2')
+ self.assertIsNone(obj.backing_store.backing_store)
+
+ def test_config_network_parse(self):
+ xml = """
+
+
+
+
+
+
+
+
+
+
+
+
+ """
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDiskBackingStore()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual(obj.source_type, 'network')
+ self.assertEqual(obj.source_protocol, 'gluster')
+ self.assertEqual(obj.source_name, 'volume1/img1')
+ self.assertEqual(obj.source_hosts[0], 'host1')
+ self.assertEqual(obj.source_ports[0], '24007')
+ self.assertEqual(obj.index, '1')
+ self.assertEqual(obj.backing_store.source_name, 'volume1/img2')
+ self.assertEqual(obj.backing_store.index, '2')
+ self.assertEqual(obj.backing_store.source_hosts[0], 'host1')
+ self.assertEqual(obj.backing_store.source_ports[0], '24007')
+ self.assertIsNone(obj.backing_store.backing_store)
+
+
class LibvirtConfigGuestFilesysTest(LibvirtConfigBaseTest):
def test_config_mount(self):
@@ -808,6 +952,18 @@ def test_config_file(self):
""")
+ def test_config_serial_port(self):
+ obj = config.LibvirtConfigGuestSerial()
+ obj.type = "tcp"
+ obj.listen_port = 11111
+ obj.listen_host = "0.0.0.0"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+
+
+ """)
+
class LibvirtConfigGuestConsoleTest(LibvirtConfigBaseTest):
def test_config_pty(self):
@@ -973,6 +1129,23 @@ def test_config_direct(self):
""")
+ def test_config_vhostuser(self):
+ obj = config.LibvirtConfigGuestInterface()
+ obj.net_type = "vhostuser"
+ obj.vhostuser_type = "unix"
+ obj.vhostuser_path = "/tmp/vhostuser.sock"
+ obj.vhostuser_mode = "server"
+ obj.mac_addr = "DE:AD:BE:EF:CA:FE"
+ obj.model = "virtio"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+
+
+
+
+ """)
+
class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
@@ -1114,6 +1287,15 @@ def test_config_kvm(self):
obj.cputune.quota = 50000
obj.cputune.period = 25000
+ obj.membacking = config.LibvirtConfigGuestMemoryBacking()
+ obj.membacking.hugepages = True
+
+ obj.memtune = config.LibvirtConfigGuestMemoryTune()
+ obj.memtune.hard_limit = 496
+ obj.memtune.soft_limit = 672
+ obj.memtune.swap_hard_limit = 1638
+ obj.memtune.min_guarantee = 2970
+
obj.name = "demo"
obj.uuid = "b38a3f43-4be2-4046-897f-b67c2f5e0147"
obj.os_type = "linux"
@@ -1140,6 +1322,15 @@ def test_config_kvm(self):
b38a3f43-4be2-4046-897f-b67c2f5e0147
demo
104857600
+
+
+
+
+ 496
+ 672
+ 1638
+ 2970
+
2
@@ -1735,3 +1926,127 @@ def test_config_cputune_timeslice(self):
50000
25000
""")
+
+ def test_config_cputune_vcpus(self):
+ cputune = config.LibvirtConfigGuestCPUTune()
+
+ vcpu0 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu0.id = 0
+ vcpu0.cpuset = set([0, 1])
+ vcpu1 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu1.id = 1
+ vcpu1.cpuset = set([2, 3])
+ vcpu2 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu2.id = 2
+ vcpu2.cpuset = set([4, 5])
+ vcpu3 = config.LibvirtConfigGuestCPUTuneVCPUPin()
+ vcpu3.id = 3
+ vcpu3.cpuset = set([6, 7])
+ cputune.vcpupin.extend([vcpu0, vcpu1, vcpu2, vcpu3])
+
+ xml = cputune.to_xml()
+ self.assertXmlEqual(xml, """
+
+
+
+
+
+ """)
+
+
+class LibvirtConfigGuestMemoryBackingTest(LibvirtConfigBaseTest):
+ def test_config_memory_backing_none(self):
+ obj = config.LibvirtConfigGuestMemoryBacking()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "")
+
+ def test_config_memory_backing_all(self):
+ obj = config.LibvirtConfigGuestMemoryBacking()
+ obj.locked = True
+ obj.sharedpages = False
+ obj.hugepages = True
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+
+
+
+
+ """)
+
+
+class LibvirtConfigGuestMemoryTuneTest(LibvirtConfigBaseTest):
+ def test_config_memory_backing_none(self):
+ obj = config.LibvirtConfigGuestMemoryTune()
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "")
+
+ def test_config_memory_backing_all(self):
+ obj = config.LibvirtConfigGuestMemoryTune()
+ obj.soft_limit = 6
+ obj.hard_limit = 28
+ obj.swap_hard_limit = 140
+ obj.min_guarantee = 270
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+
+ 28
+ 6
+ 140
+ 270
+ """)
+
+
+class LibvirtConfigGuestMetadataNovaTest(LibvirtConfigBaseTest):
+
+ def test_config_metadata(self):
+ meta = config.LibvirtConfigGuestMetaNovaInstance()
+ meta.package = "2014.2.3"
+ meta.name = "moonbuggy"
+ meta.creationTime = 1234567890
+ meta.roottype = "image"
+ meta.rootid = "fe55c69a-8b2e-4bbc-811a-9ad2023a0426"
+
+ owner = config.LibvirtConfigGuestMetaNovaOwner()
+ owner.userid = "3472c2a6-de91-4fb5-b618-42bc781ef670"
+ owner.username = "buzz"
+ owner.projectid = "f241e906-010e-4917-ae81-53f4fb8aa021"
+ owner.projectname = "moonshot"
+
+ meta.owner = owner
+
+ flavor = config.LibvirtConfigGuestMetaNovaFlavor()
+ flavor.name = "m1.lowgravity"
+ flavor.vcpus = 8
+ flavor.memory = 2048
+ flavor.swap = 10
+ flavor.disk = 50
+ flavor.ephemeral = 10
+
+ meta.flavor = flavor
+
+ xml = meta.to_xml()
+ self.assertXmlEqual(xml, """
+
+
+ moonbuggy
+ 2009-02-13 23:31:30
+
+ 2048
+ 50
+ 10
+ 10
+ 8
+
+
+ buzz
+ moonshot
+
+
+
+ """)
diff --git a/nova/tests/virt/libvirt/test_driver.py b/nova/tests/virt/libvirt/test_driver.py
index 0468cec73a..5dea0ff36c 100644
--- a/nova/tests/virt/libvirt/test_driver.py
+++ b/nova/tests/virt/libvirt/test_driver.py
@@ -16,12 +16,14 @@
import __builtin__
import contextlib
import copy
+import datetime
import errno
import functools
import os
import re
import shutil
import tempfile
+import time
import uuid
from xml.dom import minidom
@@ -34,6 +36,7 @@
from oslo.config import cfg
from nova.api.ec2 import cloud
+from nova.api.metadata import base as instance_metadata
from nova.compute import flavors
from nova.compute import manager
from nova.compute import power_state
@@ -45,14 +48,12 @@
from nova import exception
from nova.network import model as network_model
from nova import objects
-from nova.objects import flavor as flavor_obj
-from nova.objects import pci_device as pci_device_obj
-from nova.objects import service as service_obj
from nova.openstack.common import fileutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
+from nova.openstack.common import timeutils
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova.pci import pci_manager
@@ -63,11 +64,13 @@
import nova.tests.image.fake
from nova.tests import matchers
from nova.tests.objects import test_pci_device
+from nova.tests.virt.libvirt import fake_imagebackend
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova.tests.virt.libvirt import fakelibvirt
from nova.tests.virt import test_driver
from nova import utils
from nova import version
+from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
@@ -80,6 +83,7 @@
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import imagebackend
+from nova.virt.libvirt import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
@@ -147,14 +151,6 @@
"""}
-def mocked_bdm(id, bdm_info):
- bdm_mock = mock.MagicMock()
- bdm_mock.__getitem__ = lambda s, k: bdm_info[k]
- bdm_mock.get = lambda *k, **kw: bdm_info.get(*k, **kw)
- bdm_mock.id = id
- return bdm_mock
-
-
def _concurrency(signal, wait, done, target, is_block_dev=False):
signal.send()
wait.wait()
@@ -201,7 +197,8 @@ def ID(self):
return self.id
def info(self):
- return [power_state.RUNNING, None, None, None, None]
+ return [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi,
+ None, None]
def create(self):
pass
@@ -425,6 +422,11 @@ def setUp(self):
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.LibvirtDriver._get_host_uuid',
lambda _: 'cef19ce0-0ca2-11df-855d-b19fbce37686'))
+ # Prevent test suite trying to find /etc/machine-id
+ # which isn't guaranteed to exist. Instead it will use
+ # the host UUID from libvirt which we mock above
+ self.flags(sysinfo_serial="hardware", group="libvirt")
+
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
@@ -548,9 +550,6 @@ def defineXML(self, xml):
for key, val in kwargs.items():
fake.__setattr__(key, val)
- self.flags(vif_driver="nova.tests.fake_network.FakeVIFDriver",
- group='libvirt')
-
self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake)
def fake_lookup(self, instance_name):
@@ -576,21 +575,6 @@ def test_public_api_signatures(self):
inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertPublicAPISignatures(inst)
- def test_min_version_cap(self):
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- with mock.patch.object(drvr._conn, 'getLibVersion') as mock_ver:
- mock_ver.return_value = utils.convert_version_to_int((1, 5, 0))
-
- self.flags(version_cap="2.0.0", group="libvirt")
- self.assertTrue(drvr._has_min_version((1, 4, 0)))
-
- self.flags(version_cap="1.3.0", group="libvirt")
- self.assertFalse(drvr._has_min_version((1, 4, 0)))
-
- self.flags(version_cap="", group="libvirt")
- self.assertTrue(drvr._has_min_version((1, 4, 0)))
-
def test_set_host_enabled_with_disable(self):
# Tests disabling an enabled host.
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -632,7 +616,7 @@ def test_set_host_enabled_swallows_exceptions(self):
def create_instance_obj(self, context, **params):
default_params = self.test_instance
- default_params['pci_devices'] = pci_device_obj.PciDeviceList()
+ default_params['pci_devices'] = objects.PciDeviceList()
default_params.update(params)
instance = objects.Instance(context, **params)
flavor = flavors.get_default_flavor()
@@ -900,7 +884,7 @@ def set_close_callback(cb, opaque):
mock.patch.object(conn, "_connect", return_value=self.conn),
mock.patch.object(self.conn, "registerCloseCallback",
side_effect=set_close_callback),
- mock.patch.object(service_obj.Service, "get_by_compute_host",
+ mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
# verify that the driver registers for the close callback
@@ -927,7 +911,7 @@ def test_close_callback_bad_signature(self):
mock.patch.object(conn, "_connect", return_value=self.conn),
mock.patch.object(self.conn, "registerCloseCallback",
side_effect=TypeError('dd')),
- mock.patch.object(service_obj.Service, "get_by_compute_host",
+ mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
connection = conn._get_connection()
@@ -945,7 +929,7 @@ def test_close_callback_not_defined(self):
mock.patch.object(conn, "_connect", return_value=self.conn),
mock.patch.object(self.conn, "registerCloseCallback",
side_effect=AttributeError('dd')),
- mock.patch.object(service_obj.Service, "get_by_compute_host",
+ mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
connection = conn._get_connection()
@@ -1034,20 +1018,46 @@ def test_lxc_get_host_capabilities_failed(self):
self.assertEqual(vconfig.LibvirtConfigCaps, type(caps))
self.assertNotIn('aes', [x.name for x in caps.host.cpu.features])
- def test_get_guest_config(self):
+ @mock.patch.object(time, "time")
+ def test_get_guest_config(self, time_mock):
+ time_mock.return_value = 1234567.89
+
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = db.instance_create(self.context, self.test_instance)
+
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["display_name"] = "purple tomatoes"
+
+ ctxt = context.RequestContext(project_id=123,
+ project_name="aubergine",
+ user_id=456,
+ user_name="pie")
+
+ flavor = objects.Flavor.get_by_id(
+ ctxt, test_instance["instance_type_id"])
+ flavor.memory_mb = 6
+ flavor.vcpus = 28
+ flavor.root_gb = 496
+ flavor.ephemeral_gb = 8128
+ flavor.swap = 33550336
+ instance_ref = db.instance_create(ctxt, test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
- cfg = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
+
+ with mock.patch.object(objects.Flavor,
+ "get_by_id") as flavor_mock:
+ flavor_mock.return_value = flavor
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info,
+ context=ctxt)
+
self.assertEqual(cfg.uuid, instance_ref["uuid"])
self.assertEqual(cfg.acpi, True)
self.assertEqual(cfg.apic, True)
- self.assertEqual(cfg.memory, 2 * units.Mi)
- self.assertEqual(cfg.vcpus, 1)
+ self.assertEqual(cfg.memory, 6 * units.Ki)
+ self.assertEqual(cfg.vcpus, 28)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
@@ -1068,6 +1078,68 @@ def test_get_guest_config(self):
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestVideo)
+ self.assertEqual(len(cfg.metadata), 1)
+ self.assertIsInstance(cfg.metadata[0],
+ vconfig.LibvirtConfigGuestMetaNovaInstance)
+ self.assertEqual(version.version_string_with_package(),
+ cfg.metadata[0].package)
+ self.assertEqual("purple tomatoes",
+ cfg.metadata[0].name)
+ self.assertEqual(1234567.89,
+ cfg.metadata[0].creationTime)
+ self.assertEqual("image",
+ cfg.metadata[0].roottype)
+ self.assertEqual(str(instance_ref["image_ref"]),
+ cfg.metadata[0].rootid)
+
+ self.assertIsInstance(cfg.metadata[0].owner,
+ vconfig.LibvirtConfigGuestMetaNovaOwner)
+ self.assertEqual(456,
+ cfg.metadata[0].owner.userid)
+ self.assertEqual("pie",
+ cfg.metadata[0].owner.username)
+ self.assertEqual(123,
+ cfg.metadata[0].owner.projectid)
+ self.assertEqual("aubergine",
+ cfg.metadata[0].owner.projectname)
+
+ self.assertIsInstance(cfg.metadata[0].flavor,
+ vconfig.LibvirtConfigGuestMetaNovaFlavor)
+ self.assertEqual("m1.small",
+ cfg.metadata[0].flavor.name)
+ self.assertEqual(6,
+ cfg.metadata[0].flavor.memory)
+ self.assertEqual(28,
+ cfg.metadata[0].flavor.vcpus)
+ self.assertEqual(496,
+ cfg.metadata[0].flavor.disk)
+ self.assertEqual(8128,
+ cfg.metadata[0].flavor.ephemeral)
+ self.assertEqual(33550336,
+ cfg.metadata[0].flavor.swap)
+
+ def test_get_guest_config_lxc(self):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = db.instance_create(self.context, self.test_instance)
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ None, {'mapping': {}})
+ self.assertEqual(instance_ref["uuid"], cfg.uuid)
+ self.assertEqual(2 * units.Mi, cfg.memory)
+ self.assertEqual(1, cfg.vcpus)
+ self.assertEqual(vm_mode.EXE, cfg.os_type)
+ self.assertEqual("/sbin/init", cfg.os_init_path)
+ self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
+ self.assertIsNone(cfg.os_root)
+ self.assertEqual(3, len(cfg.devices))
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestFilesys)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestInterface)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_clock(self):
self.flags(virt_type='kvm', group='libvirt')
@@ -1217,27 +1289,39 @@ def test_get_guest_config_with_block_device(self):
instance_ref = db.instance_create(self.context, self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
- info = {'block_device_mapping': [
- mocked_bdm(1, {'connection_info': conn_info,
- 'mount_device': '/dev/vdc'}),
- mocked_bdm(2, {'connection_info': conn_info,
- 'mount_device': '/dev/vdd'}),
- ]}
+ info = {'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/vdc'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/vdd'}),
+ ])}
+ info['block_device_mapping'][0]['connection_info'] = conn_info
+ info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref, info)
- cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ cfg = conn._get_guest_config(instance_ref, [], {}, disk_info,
None, info)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestDisk)
- self.assertEqual(cfg.devices[2].target_dev, 'vdc')
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestDisk)
- self.assertEqual(cfg.devices[3].target_dev, 'vdd')
- self.assertTrue(info['block_device_mapping'][0].save.called)
- self.assertTrue(info['block_device_mapping'][1].save.called)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, 'vdc')
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[3].target_dev, 'vdd')
+ self.assertTrue(info['block_device_mapping'][0].save.called)
+ self.assertTrue(info['block_device_mapping'][1].save.called)
def test_get_guest_config_with_configdrive(self):
+ # It's necessary to check if the architecture is power, because
+ # power doesn't have support to ide, and so libvirt translate
+ # all ide calls to scsi
+
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -1248,9 +1332,15 @@ def test_get_guest_config_with_configdrive(self):
instance_ref)
cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
+ # The last device is selected for this. on x86 is the last ide
+ # device (hdd). Since power only support scsi, the last device
+ # is sdz
+
+ expect = {"ppc": "sdz", "ppc64": "sdz"}
+ disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd")
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
- self.assertEqual(cfg.devices[2].target_dev, 'hdd')
+ self.assertEqual(cfg.devices[2].target_dev, disk)
def test_get_guest_config_with_virtio_scsi_bus(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -1275,30 +1365,37 @@ def test_get_guest_config_with_virtio_scsi_bus_bdm(self):
image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}}
instance_ref = db.instance_create(self.context, self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
- bd_info = {'block_device_mapping': [
- mocked_bdm(1, {'connection_info': conn_info,
- 'mount_device': '/dev/sdc',
- 'disk_bus': 'scsi'}),
- mocked_bdm(2, {'connection_info': conn_info,
- 'mount_device': '/dev/sdd',
- 'disk_bus': 'scsi'}),
- ]}
+ bd_info = {
+ 'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 2,
+ 'source_type': 'volume', 'destination_type': 'volume',
+ 'device_name': '/dev/sdd', 'disk_bus': 'scsi'}),
+ ])}
+ bd_info['block_device_mapping'][0]['connection_info'] = conn_info
+ bd_info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref, bd_info, image_meta)
- cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info,
- [], bd_info)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestDisk)
- self.assertEqual(cfg.devices[2].target_dev, 'sdc')
- self.assertEqual(cfg.devices[2].target_bus, 'scsi')
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestDisk)
- self.assertEqual(cfg.devices[3].target_dev, 'sdd')
- self.assertEqual(cfg.devices[3].target_bus, 'scsi')
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestController)
- self.assertEqual(cfg.devices[4].model, 'virtio-scsi')
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ cfg = conn._get_guest_config(instance_ref, [], image_meta,
+ disk_info, [], bd_info)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[2].target_dev, 'sdc')
+ self.assertEqual(cfg.devices[2].target_bus, 'scsi')
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertEqual(cfg.devices[3].target_dev, 'sdd')
+ self.assertEqual(cfg.devices[3].target_bus, 'scsi')
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestController)
+ self.assertEqual(cfg.devices[4].model, 'virtio-scsi')
def test_get_guest_config_with_vnc(self):
self.flags(vnc_enabled=True)
@@ -1550,9 +1647,8 @@ def test_get_guest_config_with_watchdog_action_through_flavor(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- fake_flavor = flavor_obj.Flavor.get_by_id(
- self.context,
- self.test_instance['instance_type_id'])
+ fake_flavor = objects.Flavor.get_by_id(
+ self.context, self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'hw_watchdog_action': 'none'}
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -1560,7 +1656,7 @@ def test_get_guest_config_with_watchdog_action_through_flavor(self):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
- with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
+ with mock.patch.object(objects.Flavor, 'get_by_id',
return_value=fake_flavor):
cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
@@ -1589,9 +1685,8 @@ def test_get_guest_config_with_watchdog_action_meta_overrides_flavor(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- fake_flavor = flavor_obj.Flavor.get_by_id(
- self.context,
- self.test_instance['instance_type_id'])
+ fake_flavor = objects.Flavor.get_by_id(
+ self.context, self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'hw_watchdog_action': 'none'}
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -1601,7 +1696,7 @@ def test_get_guest_config_with_watchdog_action_meta_overrides_flavor(self):
image_meta = {"properties": {"hw_watchdog_action": "pause"}}
- with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
+ with mock.patch.object(objects.Flavor, 'get_by_id',
return_value=fake_flavor):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
@@ -1711,7 +1806,7 @@ def test_get_guest_config_with_video_driver_vram(self):
agent_enabled=True,
group='spice')
- instance_type = flavor_obj.Flavor.get_by_id(self.context, 5)
+ instance_type = objects.Flavor.get_by_id(self.context, 5)
instance_type.extra_specs = {'hw_video:ram_max_mb': "100"}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -1720,7 +1815,7 @@ def test_get_guest_config_with_video_driver_vram(self):
instance_ref)
image_meta = {"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}}
- with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
+ with mock.patch.object(objects.Flavor, 'get_by_id',
return_value=instance_type):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
@@ -1744,6 +1839,39 @@ def test_get_guest_config_with_video_driver_vram(self):
self.assertEqual(cfg.devices[6].type, "qxl")
self.assertEqual(cfg.devices[6].vram, 64)
+ @mock.patch('nova.virt.disk.api.teardown_container')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_unmount_fs_if_error_during_lxc_create_domain(self,
+ mock_get_inst_path, mock_ensure_tree, mock_setup_container,
+ mock_get_info, mock_teardown):
+ """If we hit an error during a `_create_domain` call to `libvirt+lxc`
+ we need to ensure the guest FS is unmounted from the host so that any
+ future `lvremove` calls will work.
+ """
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_domain = mock.MagicMock()
+ mock_instance = mock.MagicMock()
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_get_info.side_effect = exception.InstanceNotFound(
+ instance_id='foo')
+
+ mock_domain.createWithFlags.side_effect = ValueError('somethingbad')
+
+ self.assertRaises(ValueError, conn._create_domain, domain=mock_domain,
+ instance=mock_instance)
+
+ mock_teardown.assert_called_with(container_dir='/tmp/rootfs')
+
def test_video_driver_flavor_limit_not_set(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
@@ -1770,7 +1898,7 @@ def test_video_driver_ram_above_flavor_limit(self):
agent_enabled=True,
group='spice')
- instance_type = flavor_obj.Flavor.get_by_id(self.context, 5)
+ instance_type = objects.Flavor.get_by_id(self.context, 5)
instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -1779,7 +1907,7 @@ def test_video_driver_ram_above_flavor_limit(self):
instance_ref)
image_meta = {"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}}
- with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
+ with mock.patch.object(objects.Flavor, 'get_by_id',
return_value=instance_type):
self.assertRaises(exception.RequestedVRamTooHigh,
conn._get_guest_config,
@@ -1822,7 +1950,7 @@ def test_get_guest_config_with_rng_device(self):
use_usb_tablet=False,
group='libvirt')
- fake_flavor = flavor_obj.Flavor.get_by_id(
+ fake_flavor = objects.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'hw_rng:allowed': 'True'}
@@ -1833,7 +1961,7 @@ def test_get_guest_config_with_rng_device(self):
instance_ref)
image_meta = {"properties": {"hw_rng_model": "virtio"}}
- with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
+ with mock.patch.object(objects.Flavor, 'get_by_id',
return_value=fake_flavor):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
@@ -1890,7 +2018,7 @@ def test_get_guest_config_with_rng_limits(self):
use_usb_tablet=False,
group='libvirt')
- fake_flavor = flavor_obj.Flavor.get_by_id(
+ fake_flavor = objects.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'hw_rng:allowed': 'True',
@@ -1903,7 +2031,7 @@ def test_get_guest_config_with_rng_limits(self):
instance_ref)
image_meta = {"properties": {"hw_rng_model": "virtio"}}
- with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
+ with mock.patch.object(objects.Flavor, 'get_by_id',
return_value=fake_flavor):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
@@ -1933,7 +2061,7 @@ def test_get_guest_config_with_rng_backend(self):
rng_dev_path='/dev/hw_rng',
group='libvirt')
- fake_flavor = flavor_obj.Flavor.get_by_id(
+ fake_flavor = objects.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'hw_rng:allowed': 'True'}
@@ -1944,7 +2072,7 @@ def test_get_guest_config_with_rng_backend(self):
instance_ref)
image_meta = {"properties": {"hw_rng_model": "virtio"}}
- with contextlib.nested(mock.patch.object(flavor_obj.Flavor,
+ with contextlib.nested(mock.patch.object(objects.Flavor,
'get_by_id',
return_value=fake_flavor),
mock.patch('nova.virt.libvirt.driver.os.path.exists',
@@ -1977,7 +2105,7 @@ def test_get_guest_config_with_rng_dev_not_present(self):
rng_dev_path='/dev/hw_rng',
group='libvirt')
- fake_flavor = flavor_obj.Flavor.get_by_id(
+ fake_flavor = objects.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'hw_rng:allowed': 'True'}
@@ -1988,7 +2116,7 @@ def test_get_guest_config_with_rng_dev_not_present(self):
instance_ref)
image_meta = {"properties": {"hw_rng_model": "virtio"}}
- with contextlib.nested(mock.patch.object(flavor_obj.Flavor,
+ with contextlib.nested(mock.patch.object(objects.Flavor,
'get_by_id',
return_value=fake_flavor),
mock.patch('nova.virt.libvirt.driver.os.path.exists',
@@ -2004,7 +2132,7 @@ def test_get_guest_config_with_cpu_quota(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- fake_flavor = flavor_obj.Flavor.get_by_id(
+ fake_flavor = objects.flavor.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'quota:cpu_shares': '10000',
@@ -2015,7 +2143,7 @@ def test_get_guest_config_with_cpu_quota(self):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
- with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
+ with mock.patch.object(objects.flavor.Flavor, 'get_by_id',
return_value=fake_flavor):
cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
@@ -2027,7 +2155,7 @@ def test_get_guest_config_with_bogus_cpu_quota(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- fake_flavor = flavor_obj.Flavor.get_by_id(
+ fake_flavor = objects.flavor.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.extra_specs = {'quota:cpu_shares': 'fishfood',
@@ -2038,12 +2166,123 @@ def test_get_guest_config_with_bogus_cpu_quota(self):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
- with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
+ with mock.patch.object(objects.flavor.Flavor, 'get_by_id',
return_value=fake_flavor):
self.assertRaises(ValueError,
conn._get_guest_config,
instance_ref, [], {}, disk_info)
+ def _test_get_guest_config_sysinfo_serial(self, expected_serial):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ cfg = drvr._get_guest_config_sysinfo(instance_ref)
+
+ self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo)
+ self.assertEqual(version.vendor_string(),
+ cfg.system_manufacturer)
+ self.assertEqual(version.product_string(),
+ cfg.system_product)
+ self.assertEqual(version.version_string_with_package(),
+ cfg.system_version)
+ self.assertEqual(expected_serial,
+ cfg.system_serial)
+ self.assertEqual(instance_ref['uuid'],
+ cfg.system_uuid)
+
+ def test_get_guest_config_sysinfo_serial_none(self):
+ self.flags(sysinfo_serial="none", group="libvirt")
+ self._test_get_guest_config_sysinfo_serial(None)
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_uuid")
+ def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid):
+ self.flags(sysinfo_serial="hardware", group="libvirt")
+
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+ mock_uuid.return_value = theuuid
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_os(self):
+ self.flags(sysinfo_serial="os", group="libvirt")
+
+ real_open = __builtin__.open
+ with contextlib.nested(
+ mock.patch.object(__builtin__, "open"),
+ ) as (mock_open, ):
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+
+ def fake_open(filename, *args, **kwargs):
+ if filename == "/etc/machine-id":
+ h = mock.MagicMock()
+ h.read.return_value = theuuid
+ h.__enter__.return_value = h
+ return h
+ return real_open(filename, *args, **kwargs)
+
+ mock_open.side_effect = fake_open
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_auto_hardware(self):
+ self.flags(sysinfo_serial="auto", group="libvirt")
+
+ real_exists = os.path.exists
+ with contextlib.nested(
+ mock.patch.object(os.path, "exists"),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_get_host_uuid")
+ ) as (mock_exists, mock_uuid):
+ def fake_exists(filename):
+ if filename == "/etc/machine-id":
+ return False
+ return real_exists(filename)
+
+ mock_exists.side_effect = fake_exists
+
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+ mock_uuid.return_value = theuuid
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_auto_os(self):
+ self.flags(sysinfo_serial="auto", group="libvirt")
+
+ real_exists = os.path.exists
+ real_open = __builtin__.open
+ with contextlib.nested(
+ mock.patch.object(os.path, "exists"),
+ mock.patch.object(__builtin__, "open"),
+ ) as (mock_exists, mock_open):
+ def fake_exists(filename):
+ if filename == "/etc/machine-id":
+ return True
+ return real_exists(filename)
+
+ mock_exists.side_effect = fake_exists
+
+ theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
+
+ def fake_open(filename, *args, **kwargs):
+ if filename == "/etc/machine-id":
+ h = mock.MagicMock()
+ h.read.return_value = theuuid
+ h.__enter__.return_value = h
+ return h
+ return real_open(filename, *args, **kwargs)
+
+ mock_open.side_effect = fake_open
+
+ self._test_get_guest_config_sysinfo_serial(theuuid)
+
+ def test_get_guest_config_sysinfo_serial_invalid(self):
+ self.flags(sysinfo_serial="invalid", group="libvirt")
+
+ self.assertRaises(exception.NovaException,
+ libvirt_driver.LibvirtDriver,
+ fake.FakeVirtAPI(),
+ True)
+
def _create_fake_service_compute(self):
service_info = {
'host': 'fake',
@@ -2282,6 +2521,51 @@ def test_get_guest_config_machine_type_through_image_meta(self):
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
+ def test_get_guest_config_machine_type_from_config(self):
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(hw_machine_type=['x86_64=fake_machine_type'],
+ group='libvirt')
+
+ def fake_getCapabilities():
+ return """
+
+
+ cef19ce0-0ca2-11df-855d-b19fbce37686
+
+ x86_64
+ Penryn
+ Intel
+
+
+
+
+
+ """
+
+ def fake_baselineCPU(cpu, flag):
+ return """
+ Penryn
+ Intel
+
+
+ """
+
+ # Make sure the host arch is mocked as x86_64
+ self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities,
+ baselineCPU=fake_baselineCPU,
+ getVersion=lambda: 1005001)
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ instance_ref = db.instance_create(self.context, self.test_instance)
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref)
+
+ cfg = conn._get_guest_config(instance_ref,
+ _fake_network_info(self.stubs, 1),
+ {}, disk_info)
+ self.assertEqual(cfg.os_mach_type, "fake_machine_type")
+
def _test_get_guest_config_ppc64(self, device_index):
"""Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config.
"""
@@ -2389,13 +2673,7 @@ def test_get_guest_cpu_config_default_lxc(self):
{}, disk_info)
self.assertIsNone(conf.cpu)
- def test_get_guest_cpu_config_host_passthrough_new(self):
- def get_lib_version_stub():
- return (0 * 1000 * 1000) + (9 * 1000) + 11
-
- self.stubs.Set(self.conn,
- "getLibVersion",
- get_lib_version_stub)
+ def test_get_guest_cpu_config_host_passthrough(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -2413,13 +2691,7 @@ def get_lib_version_stub():
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
- def test_get_guest_cpu_config_host_model_new(self):
- def get_lib_version_stub():
- return (0 * 1000 * 1000) + (9 * 1000) + 11
-
- self.stubs.Set(self.conn,
- "getLibVersion",
- get_lib_version_stub)
+ def test_get_guest_cpu_config_host_model(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -2437,13 +2709,7 @@ def get_lib_version_stub():
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
- def test_get_guest_cpu_config_custom_new(self):
- def get_lib_version_stub():
- return (0 * 1000 * 1000) + (9 * 1000) + 11
-
- self.stubs.Set(self.conn,
- "getLibVersion",
- get_lib_version_stub)
+ def test_get_guest_cpu_config_custom(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -2463,99 +2729,8 @@ def get_lib_version_stub():
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
- def test_get_guest_cpu_config_host_passthrough_old(self):
- def get_lib_version_stub():
- return (0 * 1000 * 1000) + (9 * 1000) + 7
-
- self.stubs.Set(self.conn,
- "getLibVersion",
- get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = db.instance_create(self.context, self.test_instance)
-
- self.flags(cpu_mode="host-passthrough", group='libvirt')
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- self.assertRaises(exception.NovaException,
- conn._get_guest_config,
- instance_ref,
- _fake_network_info(self.stubs, 1),
- {},
- disk_info)
-
- def test_get_guest_cpu_config_host_model_old(self):
- def get_lib_version_stub():
- return (0 * 1000 * 1000) + (9 * 1000) + 7
-
- # Ensure we have a predictable host CPU
- def get_host_capabilities_stub(self):
- cpu = vconfig.LibvirtConfigGuestCPU()
- cpu.model = "Opteron_G4"
- cpu.vendor = "AMD"
-
- cpu.add_feature(vconfig.LibvirtConfigGuestCPUFeature("tm2"))
- cpu.add_feature(vconfig.LibvirtConfigGuestCPUFeature("ht"))
-
- caps = vconfig.LibvirtConfigCaps()
- caps.host = vconfig.LibvirtConfigCapsHost()
- caps.host.cpu = cpu
- return caps
-
- self.stubs.Set(self.conn,
- "getLibVersion",
- get_lib_version_stub)
- self.stubs.Set(libvirt_driver.LibvirtDriver,
- "_get_host_capabilities",
- get_host_capabilities_stub)
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = db.instance_create(self.context, self.test_instance)
-
- self.flags(cpu_mode="host-model", group='libvirt')
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- conf = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
- self.assertIsInstance(conf.cpu,
- vconfig.LibvirtConfigGuestCPU)
- self.assertIsNone(conf.cpu.mode)
- self.assertEqual(conf.cpu.model, "Opteron_G4")
- self.assertEqual(conf.cpu.vendor, "AMD")
- self.assertEqual(len(conf.cpu.features), 2)
- self.assertEqual(conf.cpu.features.pop().name, "tm2")
- self.assertEqual(conf.cpu.features.pop().name, "ht")
- self.assertEqual(conf.cpu.sockets, 1)
- self.assertEqual(conf.cpu.cores, 1)
- self.assertEqual(conf.cpu.threads, 1)
-
- def test_get_guest_cpu_config_custom_old(self):
- def get_lib_version_stub():
- return (0 * 1000 * 1000) + (9 * 1000) + 7
-
- self.stubs.Set(self.conn,
- "getLibVersion",
- get_lib_version_stub)
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = db.instance_create(self.context, self.test_instance)
-
- self.flags(cpu_mode="custom",
- cpu_model="Penryn",
- group='libvirt')
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref)
- conf = conn._get_guest_config(instance_ref,
- _fake_network_info(self.stubs, 1),
- {}, disk_info)
- self.assertIsInstance(conf.cpu,
- vconfig.LibvirtConfigGuestCPU)
- self.assertIsNone(conf.cpu.mode)
- self.assertEqual(conf.cpu.model, "Penryn")
- self.assertEqual(conf.cpu.sockets, 1)
- self.assertEqual(conf.cpu.cores, 1)
- self.assertEqual(conf.cpu.threads, 1)
-
def test_get_guest_cpu_topology(self):
- fake_flavor = flavor_obj.Flavor.get_by_id(
+ fake_flavor = objects.flavor.Flavor.get_by_id(
self.context,
self.test_instance['instance_type_id'])
fake_flavor.vcpus = 8
@@ -2566,7 +2741,7 @@ def test_get_guest_cpu_topology(self):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref)
- with mock.patch.object(flavor_obj.Flavor, 'get_by_id',
+ with mock.patch.object(objects.flavor.Flavor, 'get_by_id',
return_value=fake_flavor):
conf = conn._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
@@ -3851,8 +4026,8 @@ def connection_supports_direct_io_stub(dirpath):
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
- for disk in disks:
- self.assertEqual(disk.get("cache"), "none")
+ for guest_disk in disks:
+ self.assertEqual(guest_disk.get("cache"), "none")
directio_supported = False
@@ -3865,8 +4040,8 @@ def connection_supports_direct_io_stub(dirpath):
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
- for disk in disks:
- self.assertEqual(disk.get("cache"), "writethrough")
+ for guest_disk in disks:
+ self.assertEqual(guest_disk.get("cache"), "writethrough")
def _check_xml_and_disk_bus(self, image_meta,
block_device_info, wantConfig):
@@ -4446,7 +4621,7 @@ def fake_lookup(instance_name):
self.compute._rollback_live_migration(self.context, instance_ref,
'dest', False)
- #start test
+ # start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}}
@@ -4488,7 +4663,7 @@ def fake_lookup(instance_name):
self.compute._rollback_live_migration(self.context, instance_ref,
'dest', False)
- #start test
+ # start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '0.0.0.0', 'spice': '0.0.0.0'}}}
@@ -4529,7 +4704,7 @@ def fake_lookup(instance_name):
self.compute._rollback_live_migration(self.context, instance_ref,
'dest', False)
- #start test
+ # start test
migrate_data = {}
self.mox.ReplayAll()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -4565,7 +4740,7 @@ def fake_lookup(instance_name):
self.compute._rollback_live_migration(self.context, instance_ref,
'dest', False)
- #start test
+ # start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '1.2.3.4', 'spice': '1.2.3.4'}}}
@@ -4620,7 +4795,7 @@ def fake_lookup(instance_name):
self.compute._rollback_live_migration(self.context, instance_ref,
'dest', False)
- #start test
+ # start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '127.0.0.1', 'spice': '127.0.0.1'}}}
@@ -4767,26 +4942,6 @@ def fake_none(*args, **kwargs):
'vnc': '127.0.0.1'}}
self.assertEqual(result, target_res)
- def test_pre_live_migration_block_with_config_drive_mocked(self):
- # Creating testdata
- vol = {'block_device_mapping': [
- {'connection_info': 'dummy', 'mount_device': '/dev/sda'},
- {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
-
- def fake_true(*args, **kwargs):
- return True
-
- self.stubs.Set(configdrive, 'required_by', fake_true)
-
- inst_ref = {'id': 'foo'}
- c = context.get_admin_context()
-
- self.assertRaises(exception.NoLiveMigrationForConfigDriveInLibVirt,
- conn.pre_live_migration, c, inst_ref, vol, None,
- None, {'is_shared_instance_path': False,
- 'is_shared_block_storage': False})
-
def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
# Creating testdata, using temp dir.
with utils.tempdir() as tmpdir:
@@ -4867,9 +5022,69 @@ def fake_plug_vifs(instance, network_info):
conn.pre_live_migration(self.context, instance, block_device_info=None,
network_info=[], disk_info={})
- def test_get_instance_disk_info_works_correctly(self):
- # Test data
- instance_ref = db.instance_create(self.context, self.test_instance)
+ def test_pre_live_migration_image_not_created_with_shared_storage(self):
+ migrate_data_set = [{'is_shared_block_storage': False,
+ 'block_migration': False},
+ {'is_shared_block_storage': True,
+ 'block_migration': False},
+ {'is_shared_block_storage': False,
+ 'block_migration': True}]
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = db.instance_create(self.context, self.test_instance)
+ # creating mocks
+ with contextlib.nested(
+ mock.patch.object(conn,
+ '_create_images_and_backing'),
+ mock.patch.object(conn,
+ 'ensure_filtering_rules_for_instance'),
+ mock.patch.object(conn, 'plug_vifs'),
+ ) as (
+ create_image_mock,
+ rules_mock,
+ plug_mock,
+ ):
+ for migrate_data in migrate_data_set:
+ res = conn.pre_live_migration(self.context, instance,
+ block_device_info=None,
+ network_info=[], disk_info={},
+ migrate_data=migrate_data)
+ self.assertFalse(create_image_mock.called)
+ self.assertIsInstance(res, dict)
+
+ def test_pre_live_migration_with_not_shared_instance_path(self):
+ migrate_data = {'is_shared_block_storage': False,
+ 'is_shared_instance_path': False}
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = db.instance_create(self.context, self.test_instance)
+
+ def check_instance_dir(context, instance,
+ instance_dir, disk_info):
+ self.assertTrue(instance_dir)
+ # creating mocks
+ with contextlib.nested(
+ mock.patch.object(conn,
+ '_create_images_and_backing',
+ side_effect=check_instance_dir),
+ mock.patch.object(conn,
+ 'ensure_filtering_rules_for_instance'),
+ mock.patch.object(conn, 'plug_vifs'),
+ ) as (
+ create_image_mock,
+ rules_mock,
+ plug_mock,
+ ):
+ res = conn.pre_live_migration(self.context, instance,
+ block_device_info=None,
+ network_info=[], disk_info={},
+ migrate_data=migrate_data)
+ self.assertTrue(create_image_mock.called)
+ self.assertIsInstance(res, dict)
+
+ def test_get_instance_disk_info_works_correctly(self):
+ # Test data
+ instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("instance-0000000a"
""
""
@@ -5032,7 +5247,7 @@ def fake_none(*args, **kwargs):
return
def fake_getLibVersion():
- return 9007
+ return 9011
def fake_getCapabilities():
return """
@@ -5400,6 +5615,45 @@ def fake_get_info(instance):
]
self.assertEqual(gotFiles, wantFiles)
+ @mock.patch.object(utils, 'execute')
+ def test_create_ephemeral_specified_fs(self, mock_exec):
+ self.flags(default_ephemeral_format='ext3')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ conn._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
+ is_block_dev=True, max_size=20,
+ specified_fs='ext4')
+ mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L',
+ 'myVol', '/dev/something',
+ run_as_root=True)
+
+ def test_create_ephemeral_specified_fs_not_valid(self):
+ CONF.set_override('default_ephemeral_format', 'ext4')
+ ephemerals = [{'device_type': 'disk',
+ 'disk_bus': 'virtio',
+ 'device_name': '/dev/vdb',
+ 'guest_format': 'dummy',
+ 'size': 1}]
+ block_device_info = {
+ 'ephemerals': ephemerals}
+ instance_ref = self.test_instance
+ instance_ref['image_ref'] = 1
+ instance = db.instance_create(self.context, instance_ref)
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ image_meta = {'id': instance['image_ref']}
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance,
+ None,
+ image_meta)
+ disk_info['mapping'].pop('disk.local')
+
+ with contextlib.nested(
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(conn, 'get_info'),
+ mock.patch.object(conn, '_create_domain_and_network')):
+ self.assertRaises(exception.InvalidBDMFormat, conn._create_image,
+ context, instance, disk_info['mapping'],
+ block_device_info=block_device_info)
+
def test_create_ephemeral_default(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
@@ -5594,7 +5848,7 @@ def test_service_resume_after_broken_connection(self):
with contextlib.nested(
mock.patch.object(libvirt, 'openAuth',
return_value=mock.MagicMock()),
- mock.patch.object(service_obj.Service, "get_by_compute_host",
+ mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
conn.get_num_instances()
@@ -5645,11 +5899,11 @@ def _test_destroy_removes_disk(self, volume_fail=False):
db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(),
columns_to_join=['info_cache',
'security_groups'],
- use_slave=False
+ use_subordinate=False
).AndReturn(instance)
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
driver.block_device_info_get_mapping(vol
- ).AndReturn(vol['block_device_mapping'])
+ ).AndReturn(vol['block_device_mapping'])
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
"_disconnect_volume")
if volume_fail:
@@ -5659,9 +5913,10 @@ def _test_destroy_removes_disk(self, volume_fail=False):
else:
libvirt_driver.LibvirtDriver._disconnect_volume(
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.StubOutWithMock(shutil, "rmtree")
- shutil.rmtree(os.path.join(CONF.instances_path,
- 'instance-%08x' % int(instance['id'])))
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
+ 'delete_instance_files')
+ (libvirt_driver.LibvirtDriver.delete_instance_files(mox.IgnoreArg()).
+ AndReturn(True))
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_cleanup_lvm')
libvirt_driver.LibvirtDriver._cleanup_lvm(instance)
@@ -5740,44 +5995,6 @@ def fake_unfilter_instance(instance, network_info):
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
conn.destroy(self.context, instance, [], None, False)
- def test_delete_instance_files(self):
- instance = {"name": "instancename", "id": "42",
- "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64",
- "cleaned": 0, 'info_cache': None, 'security_groups': []}
-
- self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
- self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(shutil, "rmtree")
-
- db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg(),
- columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
- ).AndReturn(instance)
- os.path.exists(mox.IgnoreArg()).AndReturn(False)
- os.path.exists(mox.IgnoreArg()).AndReturn(True)
- shutil.rmtree(os.path.join(CONF.instances_path, instance['uuid']))
- os.path.exists(mox.IgnoreArg()).AndReturn(True)
- os.path.exists(mox.IgnoreArg()).AndReturn(False)
- os.path.exists(mox.IgnoreArg()).AndReturn(True)
- shutil.rmtree(os.path.join(CONF.instances_path, instance['uuid']))
- os.path.exists(mox.IgnoreArg()).AndReturn(False)
- self.mox.ReplayAll()
-
- def fake_obj_load_attr(self, attrname):
- if not hasattr(self, attrname):
- self[attrname] = {}
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(objects.Instance, 'fields',
- {'id': int, 'uuid': str, 'cleaned': int})
- self.stubs.Set(objects.Instance, 'obj_load_attr',
- fake_obj_load_attr)
-
- inst_obj = objects.Instance.get_by_uuid(None, instance['uuid'])
- self.assertFalse(conn.delete_instance_files(inst_obj))
- self.assertTrue(conn.delete_instance_files(inst_obj))
-
def test_reboot_different_ids(self):
class FakeLoopingCall:
def start(self, *a, **k):
@@ -5994,7 +6211,12 @@ def fake_get_info(instance_name):
conn._destroy(instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance, block_device_info)
+
+ system_meta = utils.instance_sys_meta(instance)
+ image_meta = utils.get_image_from_system_metadata(system_meta)
+
conn._get_guest_xml(self.context, instance, network_info, disk_info,
+ image_meta=image_meta,
block_device_info=block_device_info,
write_to_disk=True).AndReturn(dummyxml)
disk_info_json = '[{"virt_disk_size": 2}]'
@@ -6011,6 +6233,50 @@ def fake_get_info(instance_name):
conn._hard_reboot(self.context, instance, network_info,
block_device_info)
+ @mock.patch('nova.openstack.common.loopingcall.FixedIntervalLoopingCall')
+ @mock.patch('nova.pci.pci_manager.get_instance_pci_devs')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
+ @mock.patch('nova.virt.libvirt.utils.write_to_file')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config')
+ @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
+ @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
+ def test_hard_reboot_doesnt_call_glance_show(self,
+ mock_destroy, mock_get_disk_info, mock_get_guest_config,
+ mock_get_instance_path, mock_write_to_file,
+ mock_get_instance_disk_info, mock_create_images_and_backing,
+ mock_create_domand_and_network, mock_prepare_pci_devices_for_use,
+ mock_get_instance_pci_devs, mock_looping_call):
+ """For a hard reboot, we shouldn't need an additional call to glance
+ to get the image metadata.
+
+ This is important for automatically spinning up instances on a
+ host-reboot, since we won't have a user request context that'll allow
+ the Glance request to go through. We have to rely on the cached image
+ metadata, instead.
+
+ https://bugs.launchpad.net/nova/+bug/1339386
+ """
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ instance = db.instance_create(self.context, self.test_instance)
+
+ network_info = mock.MagicMock()
+ block_device_info = mock.MagicMock()
+ mock_get_disk_info.return_value = {}
+ mock_get_guest_config.return_value = mock.MagicMock()
+ mock_get_instance_path.return_value = '/foo'
+ mock_looping_call.return_value = mock.MagicMock()
+ conn._image_api = mock.MagicMock()
+
+ conn._hard_reboot(self.context, instance, network_info,
+ block_device_info)
+
+ self.assertFalse(conn._image_api.get.called)
+
def test_power_on(self):
def _check_xml_bus(name, xml, block_info):
@@ -6063,12 +6329,9 @@ def _get_inst(with_meta=True):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
- mock.patch.object(conn, '_destroy', return_value=None),
- mock.patch.object(conn, '_create_images_and_backing'),
- mock.patch.object(conn, '_create_domain_and_network'),
- mock.patch('nova.image.glance.get_remote_image_service',
- return_value=(image_service_mock,
- instance['image_ref']))):
+ mock.patch.object(conn, '_destroy', return_value=None),
+ mock.patch.object(conn, '_create_images_and_backing'),
+ mock.patch.object(conn, '_create_domain_and_network')):
conn.get_info = fake_get_info
conn._get_instance_disk_info = _check_xml_bus
conn._hard_reboot(self.context, instance, network_info,
@@ -6078,6 +6341,82 @@ def _get_inst(with_meta=True):
conn._hard_reboot(self.context, instance, network_info,
block_device_info)
+ def _test_clean_shutdown(self, seconds_to_shutdown,
+ timeout, retry_interval,
+ shutdown_attempts, succeeds):
+ info_tuple = ('fake', 'fake', 'fake', 'also_fake')
+ shutdown_count = []
+
+ def count_shutdowns():
+ shutdown_count.append("shutdown")
+
+ # Mock domain
+ mock_domain = self.mox.CreateMock(libvirt.virDomain)
+
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ mock_domain.shutdown().WithSideEffects(count_shutdowns)
+
+ retry_countdown = retry_interval
+ for x in xrange(min(seconds_to_shutdown, timeout)):
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
+ if retry_countdown == 0:
+ mock_domain.shutdown().WithSideEffects(count_shutdowns)
+ retry_countdown = retry_interval
+ else:
+ retry_countdown -= 1
+
+ if seconds_to_shutdown < timeout:
+ mock_domain.info().AndReturn(
+ (libvirt_driver.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
+
+ self.mox.ReplayAll()
+
+ def fake_lookup_by_name(instance_name):
+ return mock_domain
+
+ def fake_create_domain(**kwargs):
+ self.reboot_create_called = True
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = {"name": "instancename", "id": "instanceid",
+ "uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
+ self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
+ self.stubs.Set(conn, '_create_domain', fake_create_domain)
+ result = conn._clean_shutdown(instance, timeout, retry_interval)
+
+ self.assertEqual(succeeds, result)
+ self.assertEqual(shutdown_attempts, len(shutdown_count))
+
+ def test_clean_shutdown_first_time(self):
+ self._test_clean_shutdown(seconds_to_shutdown=2,
+ timeout=5,
+ retry_interval=3,
+ shutdown_attempts=1,
+ succeeds=True)
+
+ def test_clean_shutdown_with_retry(self):
+ self._test_clean_shutdown(seconds_to_shutdown=4,
+ timeout=5,
+ retry_interval=3,
+ shutdown_attempts=2,
+ succeeds=True)
+
+ def test_clean_shutdown_failure(self):
+ self._test_clean_shutdown(seconds_to_shutdown=6,
+ timeout=5,
+ retry_interval=3,
+ shutdown_attempts=2,
+ succeeds=False)
+
+ def test_clean_shutdown_no_wait(self):
+ self._test_clean_shutdown(seconds_to_shutdown=6,
+ timeout=0,
+ retry_interval=3,
+ shutdown_attempts=1,
+ succeeds=False)
+
def test_resume(self):
dummyxml = ("instance-0000000a"
""
@@ -6141,38 +6480,16 @@ def fake_delete_instance_files(instance):
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(self.context, instance, [])
- def test_cleanup_rbd(self):
- mock = self.mox.CreateMock(libvirt.virDomain)
-
- def fake_lookup_by_name(instance_name):
- return mock
-
- def fake_get_info(instance_name):
- return {'state': power_state.SHUTDOWN, 'id': -1}
-
- fake_volumes = ['875a8070-d0b9-4949-8b31-104d125c9a64.local',
- '875a8070-d0b9-4949-8b31-104d125c9a64.swap',
- '875a8070-d0b9-4949-8b31-104d125c9a64',
- 'wrong875a8070-d0b9-4949-8b31-104d125c9a64']
- fake_pool = 'fake_pool'
- fake_instance = {'name': 'fakeinstancename', 'id': 'instanceid',
- 'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
-
- conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
- self.stubs.Set(conn, 'get_info', fake_get_info)
-
- self.flags(images_rbd_pool=fake_pool, group='libvirt')
- self.mox.StubOutWithMock(libvirt_driver.libvirt_utils,
- 'remove_rbd_volumes')
- libvirt_driver.libvirt_utils.remove_rbd_volumes(fake_pool,
- *fake_volumes[:3])
-
- self.mox.ReplayAll()
+ @mock.patch.object(rbd_utils, 'RBDDriver')
+ def test_cleanup_rbd(self, mock_driver):
+ driver = mock_driver.return_value
+ driver.cleanup_volumes = mock.Mock()
+ fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn._cleanup_rbd(fake_instance)
- self.mox.VerifyAll()
+ driver.cleanup_volumes.assert_called_once_with(fake_instance)
def test_destroy_undefines_no_undefine_flags(self):
mock = self.mox.CreateMock(libvirt.virDomain)
@@ -6654,6 +6971,43 @@ def fake_lookup_name(name):
}
self.assertEqual(actual, expect)
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
def test_diagnostic_blockstats_exception(self):
xml = """
@@ -6728,6 +7082,35 @@ def fake_lookup_name(name):
}
self.assertEqual(actual, expect)
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
def test_diagnostic_interfacestats_exception(self):
xml = """
@@ -6804,6 +7187,38 @@ def fake_lookup_name(name):
}
self.assertEqual(actual, expect)
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
def test_diagnostic_memorystats_exception(self):
xml = """
@@ -6886,6 +7301,46 @@ def fake_lookup_name(name):
}
self.assertEqual(actual, expect)
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
def test_diagnostic_full(self):
xml = """
@@ -6970,6 +7425,46 @@ def fake_lookup_name(name):
}
self.assertEqual(actual, expect)
+ lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
+ diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
+ timeutils.set_time_override(diags_time)
+
+ actual = conn.get_instance_diagnostics({"name": "testvirt",
+ "launched_at": lt})
+ expected = {'config_drive': False,
+ 'cpu_details': [{'time': 15340000000L},
+ {'time': 1640000000L},
+ {'time': 3040000000L},
+ {'time': 1420000000L}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L},
+ {'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 688640L,
+ 'read_requests': 169L,
+ 'write_bytes': 0L,
+ 'write_requests': 0L}],
+ 'driver': 'libvirt',
+ 'hypervisor_os': 'linux',
+ 'memory_details': {'maximum': 2048, 'used': 1234},
+ 'nic_details': [{'mac_address': '52:54:00:a4:38:38',
+ 'rx_drop': 0L,
+ 'rx_errors': 0L,
+ 'rx_octets': 4408L,
+ 'rx_packets': 82L,
+ 'tx_drop': 0L,
+ 'tx_errors': 0L,
+ 'tx_octets': 0L,
+ 'tx_packets': 0L}],
+ 'state': 'running',
+ 'uptime': 10,
+ 'version': '1.0'}
+ self.assertEqual(expected, actual.serialize())
+
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_list_instance_domains")
def test_failing_vcpu_count(self, mock_list):
@@ -7033,23 +7528,7 @@ def name(self):
mock_list.assert_called_with()
def test_get_memory_used_normal(self):
- def fake_get_info():
- return ['x86_64', 15814L, 8, 1208, 1, 1, 4, 2]
-
- self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
- libvirt_driver.LibvirtDriver._conn.getInfo = fake_get_info
-
- real_open = __builtin__.open
-
- class fake_file(object):
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, exc_traceback):
- return False
-
- def read(self):
- return """
+ m = mock.mock_open(read_data="""
MemTotal: 16194180 kB
MemFree: 233092 kB
MemAvailable: 8892356 kB
@@ -7057,21 +7536,19 @@ def read(self):
Cached: 8362404 kB
SwapCached: 0 kB
Active: 8381604 kB
-"""
-
- def fake_open(path, *args, **kwargs):
- if path == "/proc/meminfo":
- return fake_file()
- else:
- return real_open(path, *args, **kwargs)
-
- self.mox.StubOutWithMock(__builtin__, 'open')
- __builtin__.open = fake_open
+""")
+ with contextlib.nested(
+ mock.patch("__builtin__.open", m, create=True),
+ mock.patch.object(libvirt_driver.LibvirtDriver,
+ "_conn"),
+ mock.patch('sys.platform', 'linux2'),
+ ) as (mock_file, mock_conn, mock_platform):
+ mock_conn.getInfo.return_value = [
+ 'x86_64', 15814L, 8, 1208, 1, 1, 4, 2]
- self.mox.ReplayAll()
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertEqual(6866, drvr._get_memory_mb_used())
+ self.assertEqual(6866, drvr._get_memory_mb_used())
def test_get_memory_used_xen(self):
self.flags(virt_type='xen', group='libvirt')
@@ -7111,7 +7588,8 @@ def UUIDString(self):
"_list_instance_domains"),
mock.patch.object(libvirt_driver.LibvirtDriver,
"_conn"),
- ) as (mock_file, mock_list, mock_conn):
+ mock.patch('sys.platform', 'linux2'),
+ ) as (mock_file, mock_list, mock_conn, mock_platform):
mock_list.return_value = [
DiagFakeDomain(0, 15814),
DiagFakeDomain(1, 750),
@@ -7310,6 +7788,103 @@ def test_get_domain_info_with_more_return(self, lookup_mock):
dom_mock.ID.assert_called_once_with()
lookup_mock.assert_called_once_with(instance['name'])
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_create_domain(self, mock_get_inst_path):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_domain = mock.MagicMock()
+ mock_instance = mock.MagicMock()
+ mock_get_inst_path.return_value = '/tmp/'
+
+ domain = conn._create_domain(domain=mock_domain,
+ instance=mock_instance)
+
+ self.assertEqual(mock_domain, domain)
+ mock_get_inst_path.assertHasCalls([mock.call(mock_instance)])
+ mock_domain.createWithFlags.assertHasCalls([mock.call(0)])
+
+ @mock.patch('nova.virt.disk.api.clean_lxc_namespace')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree,
+ mock_setup_container, mock_get_info, mock_clean):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_domain = mock.MagicMock()
+ mock_instance = mock.MagicMock()
+ inst_sys_meta = dict()
+ mock_instance.system_metadata = inst_sys_meta
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_get_info.return_value = {'state': power_state.RUNNING}
+
+ domain = conn._create_domain(domain=mock_domain,
+ instance=mock_instance)
+
+ self.assertEqual(mock_domain, domain)
+ self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
+ mock_instance.save.assert_has_calls([mock.call()])
+ mock_domain.createWithFlags.assert_has_calls([mock.call(0)])
+ mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
+ mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
+ conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
+ 'disk')])
+ setup_container_call = mock.call('/tmp/test.img',
+ container_dir='/tmp/rootfs',
+ use_cow=CONF.use_cow_images)
+ mock_setup_container.assert_has_calls([setup_container_call])
+ mock_get_info.assert_has_calls([mock.call(mock_instance)])
+ mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
+
+ @mock.patch('nova.virt.disk.api.teardown_container')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
+ @mock.patch('nova.virt.disk.api.setup_container')
+ @mock.patch('nova.openstack.common.fileutils.ensure_tree')
+ @mock.patch.object(fake_libvirt_utils, 'get_instance_path')
+ def test_create_domain_lxc_not_running(self, mock_get_inst_path,
+ mock_ensure_tree,
+ mock_setup_container,
+ mock_get_info, mock_teardown):
+ self.flags(virt_type='lxc', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ mock_domain = mock.MagicMock()
+ mock_instance = mock.MagicMock()
+ inst_sys_meta = dict()
+ mock_instance.system_metadata = inst_sys_meta
+ mock_get_inst_path.return_value = '/tmp/'
+ mock_image_backend = mock.MagicMock()
+ conn.image_backend = mock_image_backend
+ mock_image = mock.MagicMock()
+ mock_image.path = '/tmp/test.img'
+ conn.image_backend.image.return_value = mock_image
+ mock_setup_container.return_value = '/dev/nbd0'
+ mock_get_info.return_value = {'state': power_state.SHUTDOWN}
+
+ domain = conn._create_domain(domain=mock_domain,
+ instance=mock_instance)
+
+ self.assertEqual(mock_domain, domain)
+ self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
+ mock_instance.save.assert_has_calls([mock.call()])
+ mock_domain.createWithFlags.assert_has_calls([mock.call(0)])
+ mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
+ mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
+ conn.image_backend.image.assert_has_calls([mock.call(mock_instance,
+ 'disk')])
+ setup_container_call = mock.call('/tmp/test.img',
+ container_dir='/tmp/rootfs',
+ use_cow=CONF.use_cow_images)
+ mock_setup_container.assert_has_calls([setup_container_call])
+ mock_get_info.assert_has_calls([mock.call(mock_instance)])
+ teardown_call = mock.call(container_dir='/tmp/rootfs')
+ mock_teardown.assert_has_calls([teardown_call])
+
def test_create_domain_define_xml_fails(self):
"""Tests that the xml is logged when defining the domain fails."""
fake_xml = "this is a test"
@@ -7510,15 +8085,17 @@ def _test_attach_detach_interface_get_config(self, method_name):
else:
raise ValueError("Unhandled method %" % method_name)
- fake_flavor = flavor_obj.Flavor.get_by_id(
+ fake_flavor = objects.Flavor.get_by_id(
self.context, test_instance['instance_type_id'])
expected = conn.vif_driver.get_config(test_instance, network_info[0],
fake_image_meta,
- fake_flavor)
+ fake_flavor,
+ CONF.libvirt.virt_type)
self.mox.StubOutWithMock(conn.vif_driver, 'get_config')
conn.vif_driver.get_config(test_instance, network_info[0],
fake_image_meta,
- mox.IsA(flavor_obj.Flavor)).\
+ mox.IsA(objects.Flavor),
+ CONF.libvirt.virt_type).\
AndReturn(expected)
self.mox.ReplayAll()
@@ -7591,6 +8168,18 @@ def test_default_device_names_for_instance(self):
ephemerals, swap,
block_device_mapping)
+ def test_is_supported_fs_format(self):
+ supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
+ disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ for fs in supported_fs:
+ self.assertTrue(conn.is_supported_fs_format(fs))
+
+ supported_fs = ['', 'dummy']
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ for fs in supported_fs:
+ self.assertFalse(conn.is_supported_fs_format(fs))
+
def test_hypervisor_hostname_caching(self):
# Make sure that the first hostname is always returned
class FakeConn(object):
@@ -7678,7 +8267,7 @@ def fake_none(*args, **kwargs):
return
def fake_getLibVersion():
- return 9007
+ return 9011
def fake_getCapabilities():
return """
@@ -7750,22 +8339,28 @@ def fake_baselineCPU(cpu, flag):
self.stubs.Set(conn,
'_lookup_by_name',
fake_lookup_name)
- block_device_info = {'block_device_mapping': [
- mocked_bdm(1, {'guest_format': None,
+ block_device_info = {'block_device_mapping':
+ driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'guest_format': None,
'boot_index': 0,
- 'mount_device': '/dev/vda',
- 'connection_info':
- {'driver_volume_type': 'iscsi'},
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': '/dev/vda',
'disk_bus': 'virtio',
'device_type': 'disk',
'delete_on_termination': False}),
- ]}
- conn.post_live_migration_at_destination(self.context, instance,
- network_info, True,
- block_device_info=block_device_info)
- self.assertTrue('fake' in self.resultXML)
- self.assertTrue(
- block_device_info['block_device_mapping'][0].save.called)
+ ])}
+ block_device_info['block_device_mapping'][0]['connection_info'] = (
+ {'driver_volume_type': 'iscsi'})
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ conn.post_live_migration_at_destination(
+ self.context, instance, network_info, True,
+ block_device_info=block_device_info)
+ self.assertTrue('fake' in self.resultXML)
+ self.assertTrue(
+ block_device_info['block_device_mapping'][0].save.called)
def test_create_propagates_exceptions(self):
self.flags(virt_type='lxc', group='libvirt')
@@ -7803,10 +8398,31 @@ def test_create_without_pause(self):
self.assertEqual(0, create.call_args_list[0][1]['launch_flags'])
self.assertEqual(0, domain.resume.call_count)
+ def test_lxc_create_and_rootfs_saved(self):
+ self.flags(virt_type='lxc', group='libvirt')
+
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = db.instance_create(self.context, self.test_instance)
+ inst_obj = objects.Instance.get_by_uuid(self.context, instance['uuid'])
+
+ with contextlib.nested(
+ mock.patch('nova.virt.disk.api.setup_container',
+ return_value='/dev/nbd1'),
+ mock.patch('nova.virt.disk.api.clean_lxc_namespace'),
+ mock.patch('nova.openstack.common.fileutils.ensure_tree'),
+ mock.patch.object(conn.image_backend, 'image'),
+ mock.patch.object(conn, '_enable_hairpin'),
+ mock.patch.object(conn, 'get_info',
+ return_value={'state': power_state.RUNNING})
+ ):
+ conn._conn.defineXML = mock.Mock()
+ conn._create_domain('xml', instance=inst_obj)
+ self.assertEqual('/dev/nbd1',
+ inst_obj.system_metadata.get(
+ 'rootfs_device_name'))
+
def _test_create_with_network_events(self, neutron_failure=None,
power_on=True):
- self.flags(vif_driver="nova.tests.fake_network.FakeVIFDriver",
- group='libvirt')
generated_events = []
def wait_timeout():
@@ -7917,6 +8533,88 @@ def test_create_with_network_events_neutron_failed_fatal_error(
def test_create_with_network_events_non_neutron(self, is_neutron):
self._test_create_with_network_events()
+ @mock.patch('nova.volume.encryptors.get_encryption_metadata')
+ @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
+ def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata):
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx)
+ mock_dom = mock.MagicMock()
+ mock_encryption_meta = mock.MagicMock()
+ get_encryption_metadata.return_value = mock_encryption_meta
+
+ fake_xml = """
+
+ instance-00000001
+ 1048576
+ 1
+
+
+
+
+
+
+
+
+ """
+ fake_volume_id = "fake-volume-id"
+ connection_info = {"driver_volume_type": "fake",
+ "data": {"access_mode": "rw",
+ "volume_id": fake_volume_id}}
+
+ def fake_getitem(*args, **kwargs):
+ fake_bdm = {'connection_info': connection_info,
+ 'mount_device': '/dev/vda'}
+ return fake_bdm.get(args[0])
+
+ mock_volume = mock.MagicMock()
+ mock_volume.__getitem__.side_effect = fake_getitem
+ bdi = {'block_device_mapping': [mock_volume]}
+ network_info = [network_model.VIF(id='1'),
+ network_model.VIF(id='2', active=True)]
+ disk_info = {'bus': 'virtio', 'type': 'file',
+ 'dev': 'vda'}
+ get_info_from_bdm.return_value = disk_info
+
+ with contextlib.nested(
+ mock.patch.object(conn, '_connect_volume'),
+ mock.patch.object(conn, '_get_volume_encryptor'),
+ mock.patch.object(conn, 'plug_vifs'),
+ mock.patch.object(conn.firewall_driver, 'setup_basic_filtering'),
+ mock.patch.object(conn.firewall_driver,
+ 'prepare_instance_filter'),
+ mock.patch.object(conn, '_create_domain'),
+ mock.patch.object(conn.firewall_driver, 'apply_instance_filter'),
+ ) as (connect_volume, get_volume_encryptor, plug_vifs,
+ setup_basic_filtering, prepare_instance_filter, create_domain,
+ apply_instance_filter):
+ connect_volume.return_value = mock.MagicMock(
+ source_path='/path/fake-volume1')
+ create_domain.return_value = mock_dom
+
+ domain = conn._create_domain_and_network(self.context, fake_xml,
+ instance, network_info,
+ block_device_info=bdi)
+
+ get_info_from_bdm.assert_called_once_with(CONF.libvirt.virt_type,
+ mock_volume)
+ connect_volume.assert_called_once_with(connection_info, disk_info)
+ self.assertEqual(connection_info['data']['device_path'],
+ '/path/fake-volume1')
+ mock_volume.save.assert_called_once_with(self.context)
+ get_encryption_metadata.assert_called_once_with(self.context,
+ conn._volume_api, fake_volume_id, connection_info)
+ get_volume_encryptor.assert_called_once_with(connection_info,
+ mock_encryption_meta)
+ plug_vifs.assert_called_once_with(instance, network_info)
+ setup_basic_filtering.assert_called_once_with(instance,
+ network_info)
+ prepare_instance_filter.assert_called_once_with(instance,
+ network_info)
+ create_domain.assert_called_once_with(fake_xml, instance=instance,
+ launch_flags=0,
+ power_on=True)
+ self.assertEqual(mock_dom, domain)
+
def test_get_neutron_events(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = [network_model.VIF(id='1'),
@@ -7955,6 +8653,79 @@ def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug):
conn.cleanup, 'ctxt', fake_inst, 'netinfo')
unplug.assert_called_once_with(fake_inst, 'netinfo', True)
+ def test_swap_volume(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ mock_dom = mock.MagicMock()
+
+ with mock.patch.object(drvr._conn, 'defineXML',
+ create=True) as mock_define:
+ xmldoc = ""
+ srcfile = "/first/path"
+ dstfile = "/second/path"
+
+ mock_dom.XMLDesc.return_value = xmldoc
+ mock_dom.isPersistent.return_value = True
+ mock_dom.blockJobInfo.return_value = {}
+
+ drvr._swap_volume(mock_dom, srcfile, dstfile, 1)
+
+ mock_dom.XMLDesc.assert_called_once_with(
+ fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
+ fakelibvirt.VIR_DOMAIN_XML_SECURE)
+ mock_dom.blockRebase.assert_called_once_with(
+ srcfile, dstfile, 0,
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
+ mock_dom.blockResize.assert_called_once_with(
+ srcfile, 1 * units.Gi / units.Ki)
+ mock_define.assert_called_once_with(xmldoc)
+
+ def test_live_snapshot(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+
+ mock_dom = mock.MagicMock()
+
+ with contextlib.nested(
+ mock.patch.object(drvr._conn, 'defineXML', create=True),
+ mock.patch.object(fake_libvirt_utils, 'get_disk_size'),
+ mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'),
+ mock.patch.object(fake_libvirt_utils, 'create_cow_image'),
+ mock.patch.object(fake_libvirt_utils, 'chown'),
+ mock.patch.object(fake_libvirt_utils, 'extract_snapshot'),
+ ) as (mock_define, mock_size, mock_backing, mock_create_cow,
+ mock_chown, mock_snapshot):
+
+ xmldoc = ""
+ srcfile = "/first/path"
+ dstfile = "/second/path"
+ bckfile = "/other/path"
+ dltfile = dstfile + ".delta"
+
+ mock_dom.XMLDesc.return_value = xmldoc
+ mock_dom.isPersistent.return_value = True
+ mock_size.return_value = 1004009
+ mock_backing.return_value = bckfile
+
+ drvr._live_snapshot(mock_dom, srcfile, dstfile, "qcow2")
+
+ mock_dom.XMLDesc.assert_called_once_with(
+ fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
+ fakelibvirt.VIR_DOMAIN_XML_SECURE)
+ mock_dom.blockRebase.assert_called_once_with(
+ srcfile, dltfile, 0,
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
+ libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
+
+ mock_size.assert_called_once_with(srcfile)
+ mock_backing.assert_called_once_with(srcfile, basename=False)
+ mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
+ mock_chown.assert_called_once_with(dltfile, os.getuid())
+ mock_snapshot.assert_called_once_with(dltfile, "qcow2",
+ dstfile, "qcow2")
+ mock_define.assert_called_once_with(xmldoc)
+
class HostStateTestCase(test.TestCase):
@@ -9217,6 +9988,46 @@ def fake_execute(*args, **kwargs):
self.assertFalse(self.copy_or_move_swap_called)
self.assertEqual(disk_info_text, out)
+ def test_migrate_disk_and_power_off_lvm(self):
+ """Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection
+ .migrate_disk_and_power_off.
+ """
+
+ self.flags(images_type='lvm', group='libvirt')
+ disk_info = [{'type': 'raw', 'path': '/dev/vg/disk',
+ 'disk_size': '83886080'},
+ {'type': 'raw', 'path': '/dev/disk.local',
+ 'disk_size': '83886080'}]
+ disk_info_text = jsonutils.dumps(disk_info)
+
+ def fake_get_instance_disk_info(instance, xml=None,
+ block_device_info=None):
+ return disk_info_text
+
+ def fake_destroy(instance):
+ pass
+
+ def fake_get_host_ip_addr():
+ return '10.0.0.1'
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ self.stubs.Set(self.libvirtconnection, 'get_instance_disk_info',
+ fake_get_instance_disk_info)
+ self.stubs.Set(self.libvirtconnection, '_destroy', fake_destroy)
+ self.stubs.Set(self.libvirtconnection, 'get_host_ip_addr',
+ fake_get_host_ip_addr)
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ ins_ref = self._create_instance()
+ flavor = {'root_gb': 10, 'ephemeral_gb': 20}
+
+ # Migration is not implemented for LVM backed instances
+ self.assertRaises(exception.MigrationPreCheckError,
+ self.libvirtconnection.migrate_disk_and_power_off,
+ None, ins_ref, '10.0.0.1', flavor, None)
+
def test_migrate_disk_and_power_off_resize_error(self):
instance = self._create_instance()
flavor = {'root_gb': 5}
@@ -9294,26 +10105,9 @@ def test_disk_qcow2_to_raw(self, mock_execute):
def test_disk_resize_raw(self, mock_extend):
info = {'type': 'raw', 'path': '/test/disk'}
- self.flags(use_cow_images=False)
-
self.libvirtconnection._disk_resize(info, 50)
mock_extend.assert_called_once_with(info['path'], 50, use_cow=False)
- @mock.patch('nova.virt.disk.api.extend')
- def test_disk_resize_raw_use_cow_images(self, mock_extend):
- info = {'type': 'raw', 'path': '/test/disk'}
-
- self.flags(use_cow_images=True)
-
- with mock.patch.object(
- self.libvirtconnection, '_disk_raw_to_qcow2') as mock_convert:
-
- self.libvirtconnection._disk_resize(info, 50)
-
- mock_convert.assert_called_once_with(info['path'])
- mock_extend.assert_called_once_with(
- info['path'], 50, use_cow=False)
-
@mock.patch('nova.virt.disk.api.can_resize_image')
@mock.patch('nova.virt.disk.api.is_image_partitionless')
@mock.patch('nova.virt.disk.api.extend')
@@ -9321,34 +10115,6 @@ def test_disk_resize_qcow2(
self, mock_extend, mock_can_resize, mock_is_partitionless):
info = {'type': 'qcow2', 'path': '/test/disk'}
- self.flags(use_cow_images=False)
-
- with contextlib.nested(
- mock.patch.object(
- self.libvirtconnection, '_disk_qcow2_to_raw'),
- mock.patch.object(
- self.libvirtconnection, '_disk_raw_to_qcow2'))\
- as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2):
-
- mock_can_resize.return_value = True
- mock_is_partitionless.return_value = True
-
- self.libvirtconnection._disk_resize(info, 50)
-
- mock_disk_qcow2_to_raw.assert_called_once_with(info['path'])
- mock_extend.assert_called_once_with(
- info['path'], 50, use_cow=False)
- self.assertFalse(mock_disk_raw_to_qcow2.called)
-
- @mock.patch('nova.virt.disk.api.can_resize_image')
- @mock.patch('nova.virt.disk.api.is_image_partitionless')
- @mock.patch('nova.virt.disk.api.extend')
- def test_disk_resize_qcow2_use_cow_images(
- self, mock_extend, mock_can_resize, mock_is_partitionless):
- info = {'type': 'qcow2', 'path': '/test/disk'}
-
- self.flags(use_cow_images=True)
-
with contextlib.nested(
mock.patch.object(
self.libvirtconnection, '_disk_qcow2_to_raw'),
@@ -9366,7 +10132,7 @@ def test_disk_resize_qcow2_use_cow_images(
info['path'], 50, use_cow=False)
mock_disk_raw_to_qcow2.assert_called_once_with(info['path'])
- def _test_finish_migration(self, power_on):
+ def _test_finish_migration(self, power_on, resize_instance=False):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_migration.
"""
@@ -9378,12 +10144,7 @@ def _test_finish_migration(self, power_on):
disk_info_text = jsonutils.dumps(disk_info)
powered_on = power_on
self.fake_create_domain_called = False
-
- def fake_can_resize_image(path, size):
- return False
-
- def fake_extend(path, size, use_cow=False):
- pass
+ self.fake_disk_resize_called = False
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
@@ -9417,10 +10178,12 @@ def fake_get_info(instance):
else:
return {'state': power_state.SHUTDOWN}
+ def fake_disk_resize(info, size):
+ self.fake_disk_resize_called = True
+
self.flags(use_cow_images=True)
- self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
- self.stubs.Set(libvirt_driver.disk, 'can_resize_image',
- fake_can_resize_image)
+ self.stubs.Set(self.libvirtconnection, '_disk_resize',
+ fake_disk_resize)
self.stubs.Set(self.libvirtconnection, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.libvirtconnection, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(self.libvirtconnection, '_create_image',
@@ -9439,8 +10202,14 @@ def fake_get_info(instance):
self.libvirtconnection.finish_migration(
context.get_admin_context(), None, ins_ref,
- disk_info_text, [], None, None, None, power_on)
+ disk_info_text, [], None,
+ resize_instance, None, power_on)
self.assertTrue(self.fake_create_domain_called)
+ self.assertEqual(
+ resize_instance, self.fake_disk_resize_called)
+
+ def test_finish_migration_resize(self):
+ self._test_finish_migration(True, resize_instance=True)
def test_finish_migration_power_on(self):
self._test_finish_migration(True)
@@ -9839,21 +10608,23 @@ def _test_attach_detach_interface(self, method, power_state,
self.libvirtconnection.firewall_driver.setup_basic_filtering(
instance, [network_info[0]])
- fake_flavor = flavor_obj.Flavor.get_by_id(
+ fake_flavor = objects.Flavor.get_by_id(
self.context, instance['instance_type_id'])
if method == 'attach_interface':
fake_image_meta = {'id': instance['image_ref']}
elif method == 'detach_interface':
fake_image_meta = None
expected = self.libvirtconnection.vif_driver.get_config(
- instance, network_info[0], fake_image_meta, fake_flavor)
+ instance, network_info[0], fake_image_meta, fake_flavor,
+ CONF.libvirt.virt_type)
self.mox.StubOutWithMock(self.libvirtconnection.vif_driver,
'get_config')
self.libvirtconnection.vif_driver.get_config(
instance, network_info[0],
fake_image_meta,
- mox.IsA(flavor_obj.Flavor)).AndReturn(expected)
+ mox.IsA(objects.Flavor),
+ CONF.libvirt.virt_type).AndReturn(expected)
domain.info().AndReturn([power_state])
if method == 'attach_interface':
domain.attachDeviceFlags(expected.to_xml(), expected_flags)
@@ -9903,6 +10674,303 @@ def test_detach_interface_with_shutdown_instance(self):
'detach_interface', power_state.SHUTDOWN,
expected_flags=(libvirt.VIR_DOMAIN_AFFECT_CONFIG))
+ def test_rescue(self):
+ instance = self._create_instance()
+ instance.config_drive = False
+ dummyxml = ("instance-0000000a"
+ ""
+ ""
+ ""
+ ""
+ ""
+ ""
+ ""
+ "")
+ network_info = _fake_network_info(self.stubs, 1)
+
+ self.mox.StubOutWithMock(self.libvirtconnection,
+ '_get_existing_domain_xml')
+ self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
+ self.mox.StubOutWithMock(imagebackend.Backend, 'image')
+ self.mox.StubOutWithMock(imagebackend.Image, 'cache')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_get_guest_xml')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_destroy')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_create_domain')
+
+ self.libvirtconnection._get_existing_domain_xml(mox.IgnoreArg(),
+ mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+ imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'disk.rescue', 'default'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ user_id=mox.IgnoreArg()).MultipleTimes()
+
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ size=None, user_id=mox.IgnoreArg())
+
+ image_meta = {'id': 'fake', 'name': 'fake'}
+ self.libvirtconnection._get_guest_xml(mox.IgnoreArg(), instance,
+ network_info, mox.IgnoreArg(),
+ image_meta, rescue=mox.IgnoreArg(),
+ write_to_disk=mox.IgnoreArg()
+ ).AndReturn(dummyxml)
+
+ self.libvirtconnection._destroy(instance)
+ self.libvirtconnection._create_domain(mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ rescue_password = 'fake_password'
+
+ self.libvirtconnection.rescue(self.context, instance,
+ network_info, image_meta, rescue_password)
+ self.mox.VerifyAll()
+
+ def test_rescue_config_drive(self):
+ instance = self._create_instance()
+ uuid = instance.uuid
+ configdrive_path = uuid + '/disk.config.rescue'
+ dummyxml = ("instance-0000000a"
+ ""
+ ""
+ ""
+ ""
+ ""
+ ""
+ ""
+ "")
+ network_info = _fake_network_info(self.stubs, 1)
+
+ self.mox.StubOutWithMock(self.libvirtconnection,
+ '_get_existing_domain_xml')
+ self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
+ self.mox.StubOutWithMock(imagebackend.Backend, 'image')
+ self.mox.StubOutWithMock(imagebackend.Image, 'cache')
+ self.mox.StubOutWithMock(instance_metadata.InstanceMetadata,
+ '__init__')
+ self.mox.StubOutWithMock(configdrive, 'ConfigDriveBuilder')
+ self.mox.StubOutWithMock(configdrive.ConfigDriveBuilder, 'make_drive')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_get_guest_xml')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_destroy')
+ self.mox.StubOutWithMock(self.libvirtconnection, '_create_domain')
+
+ self.libvirtconnection._get_existing_domain_xml(mox.IgnoreArg(),
+ mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
+ libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Backend.image(instance, 'disk.rescue', 'default'
+ ).AndReturn(fake_imagebackend.Raw())
+
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ user_id=mox.IgnoreArg()).MultipleTimes()
+
+ imagebackend.Image.cache(context=mox.IgnoreArg(),
+ fetch_func=mox.IgnoreArg(),
+ filename=mox.IgnoreArg(),
+ image_id=mox.IgnoreArg(),
+ project_id=mox.IgnoreArg(),
+ size=None, user_id=mox.IgnoreArg())
+
+ instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(),
+ content=mox.IgnoreArg(),
+ extra_md=mox.IgnoreArg(),
+ network_info=mox.IgnoreArg())
+ cdb = self.mox.CreateMockAnything()
+ m = configdrive.ConfigDriveBuilder(instance_md=mox.IgnoreArg())
+ m.AndReturn(cdb)
+ # __enter__ and __exit__ are required by "with"
+ cdb.__enter__().AndReturn(cdb)
+ cdb.make_drive(mox.Regex(configdrive_path))
+ cdb.__exit__(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()
+ ).AndReturn(None)
+
+ imagebackend.Backend.image(instance, 'disk.config.rescue', 'raw'
+ ).AndReturn(fake_imagebackend.Raw())
+ imagebackend.Image.cache(fetch_func=mox.IgnoreArg(),
+ context=mox.IgnoreArg(),
+ filename='disk.config.rescue')
+
+ image_meta = {'id': 'fake', 'name': 'fake'}
+ self.libvirtconnection._get_guest_xml(mox.IgnoreArg(), instance,
+ network_info, mox.IgnoreArg(),
+ image_meta, rescue=mox.IgnoreArg(),
+ write_to_disk=mox.IgnoreArg()
+ ).AndReturn(dummyxml)
+ self.libvirtconnection._destroy(instance)
+ self.libvirtconnection._create_domain(mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+
+ rescue_password = 'fake_password'
+
+ self.libvirtconnection.rescue(self.context, instance, network_info,
+ image_meta, rescue_password)
+ self.mox.VerifyAll()
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files(self, get_instance_path, exists, exe,
+ shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ exe.assert_called_with('mv', '/path', '/path_del')
+ shutil.assert_called_with('/path_del')
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_resize(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = [Exception(), None]
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')]
+ self.assertEqual(expected, exe.mock_calls)
+ shutil.assert_called_with('/path_del')
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_failed(self, get_instance_path, exists, exe,
+ shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ exists.side_effect = [False, False, True, True]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ exe.assert_called_with('mv', '/path', '/path_del')
+ shutil.assert_called_with('/path_del')
+ self.assertFalse(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_mv_failed(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = Exception()
+ exists.side_effect = [True, True]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')] * 2
+ self.assertEqual(expected, exe.mock_calls)
+ self.assertFalse(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_resume(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = Exception()
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')] * 2
+ self.assertEqual(expected, exe.mock_calls)
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_none(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = Exception()
+ exists.side_effect = [False, False, False, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')] * 2
+ self.assertEqual(expected, exe.mock_calls)
+ self.assertEqual(0, len(shutil.mock_calls))
+ self.assertTrue(result)
+
+ @mock.patch('shutil.rmtree')
+ @mock.patch('nova.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('nova.virt.libvirt.utils.get_instance_path')
+ def test_delete_instance_files_concurrent(self, get_instance_path, exists,
+ exe, shutil):
+ lv = self.libvirtconnection
+ get_instance_path.return_value = '/path'
+ instance = objects.Instance(uuid='fake-uuid', id=1)
+
+ nova.utils.execute.side_effect = [Exception(), Exception(), None]
+ exists.side_effect = [False, False, True, False]
+
+ result = lv.delete_instance_files(instance)
+ get_instance_path.assert_called_with(instance)
+ expected = [mock.call('mv', '/path', '/path_del'),
+ mock.call('mv', '/path_resize', '/path_del')]
+ expected.append(expected[0])
+ self.assertEqual(expected, exe.mock_calls)
+ shutil.assert_called_with('/path_del')
+ self.assertTrue(result)
+
class LibvirtVolumeUsageTestCase(test.TestCase):
"""Test for LibvirtDriver.get_all_volume_usage."""
@@ -10029,6 +11097,40 @@ def setUp(self):
"""
+ # alternate domain info with network-backed snapshot chain
+ self.dom_netdisk_xml = """
+
+
+
+
+
+ 0e38683e-f0af-418f-a3f1-6b67eaffffff
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 0e38683e-f0af-418f-a3f1-6b67ea0f919d
+
+
+
+ """
+
self.create_info = {'type': 'qcow2',
'snapshot_id': '1234-5678',
'new_file': 'new-file'}
@@ -10044,6 +11146,10 @@ def setUp(self):
'file_to_merge': 'snap.img',
'merge_target_file': 'other-snap.img'}
+ self.delete_info_netdisk = {'type': 'qcow2',
+ 'file_to_merge': 'snap.img',
+ 'merge_target_file': 'root.img'}
+
self.delete_info_invalid_type = {'type': 'made_up_type',
'file_to_merge': 'some_file',
'merge_target_file':
@@ -10412,3 +11518,89 @@ def test_volume_snapshot_delete_invalid_type(self):
self.volume_uuid,
self.snapshot_id,
self.delete_info_invalid_type)
+
+ def test_volume_snapshot_delete_netdisk_1(self):
+ """Delete newest snapshot -- blockRebase for libgfapi/network disk."""
+
+ class FakeNetdiskDomain(FakeVirtDomain):
+ def __init__(self, *args, **kwargs):
+ super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
+
+ def XMLDesc(self, *args):
+ return self.dom_netdisk_xml
+
+ # Ensure the libvirt lib has VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
+ self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
+
+ instance = db.instance_create(self.c, self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockRebase('vdb', 'vdb[1]', 0, 0)
+
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id, self.delete_info_1)
+
+ self.mox.VerifyAll()
+
+ def test_volume_snapshot_delete_netdisk_2(self):
+ """Delete older snapshot -- blockCommit for libgfapi/network disk."""
+
+ class FakeNetdiskDomain(FakeVirtDomain):
+ def __init__(self, *args, **kwargs):
+ super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
+
+ def XMLDesc(self, *args):
+ return self.dom_netdisk_xml
+
+ # Ensure the libvirt lib has VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
+ self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
+
+ instance = db.instance_create(self.c, self.inst)
+ snapshot_id = 'snapshot-1234'
+
+ domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
+ self.mox.StubOutWithMock(domain, 'XMLDesc')
+ domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
+
+ self.mox.StubOutWithMock(self.conn, '_lookup_by_name')
+ self.mox.StubOutWithMock(self.conn, '_has_min_version')
+ self.mox.StubOutWithMock(domain, 'blockRebase')
+ self.mox.StubOutWithMock(domain, 'blockCommit')
+ self.mox.StubOutWithMock(domain, 'blockJobInfo')
+
+ self.conn._lookup_by_name('instance-%s' % instance['id']).\
+ AndReturn(domain)
+ self.conn._has_min_version(mox.IgnoreArg()).AndReturn(True)
+
+ domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0,
+ fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
+
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
+ domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
+
+ self.mox.ReplayAll()
+
+ self.conn._volume_snapshot_delete(self.c, instance, self.volume_uuid,
+ snapshot_id,
+ self.delete_info_netdisk)
+
+ self.mox.VerifyAll()
diff --git a/nova/tests/virt/libvirt/test_imagebackend.py b/nova/tests/virt/libvirt/test_imagebackend.py
index 871427cf44..3a87c2e397 100644
--- a/nova/tests/virt/libvirt/test_imagebackend.py
+++ b/nova/tests/virt/libvirt/test_imagebackend.py
@@ -18,7 +18,6 @@
import tempfile
import fixtures
-import mock
from oslo.config import cfg
import inspect
@@ -30,6 +29,7 @@
from nova.tests import fake_processutils
from nova.tests.virt.libvirt import fake_libvirt_utils
from nova.virt.libvirt import imagebackend
+from nova.virt.libvirt import rbd_utils
CONF = cfg.CONF
@@ -380,14 +380,14 @@ def test_create_image_with_size(self):
def test_create_image_too_small(self):
fn = self.prepare_mocks()
self.mox.StubOutWithMock(os.path, 'exists')
- self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
+ self.mox.StubOutWithMock(imagebackend.Qcow2, 'get_disk_size')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.DISK_INFO_PATH).AndReturn(False)
os.path.exists(self.INSTANCES_PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
- imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
- ).AndReturn(self.SIZE)
+ imagebackend.Qcow2.get_disk_size(self.TEMPLATE_PATH
+ ).AndReturn(self.SIZE)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
@@ -671,14 +671,8 @@ def setUp(self):
group='libvirt')
self.libvirt_utils = imagebackend.libvirt_utils
self.utils = imagebackend.utils
- self.rbd = self.mox.CreateMockAnything()
- self.rados = self.mox.CreateMockAnything()
-
- def prepare_mocks(self):
- fn = self.mox.CreateMockAnything()
- self.mox.StubOutWithMock(imagebackend, 'rbd')
- self.mox.StubOutWithMock(imagebackend, 'rados')
- return fn
+ self.mox.StubOutWithMock(rbd_utils, 'rbd')
+ self.mox.StubOutWithMock(rbd_utils, 'rados')
def test_cache(self):
image = self.image_class(self.INSTANCE, self.NAME)
@@ -699,6 +693,7 @@ def test_cache(self):
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
+ fn = self.mox.CreateMockAnything()
image = self.image_class(self.INSTANCE, self.NAME)
self.mox.StubOutWithMock(os.path, 'exists')
@@ -746,23 +741,77 @@ def test_cache_template_exists(self):
self.mox.VerifyAll()
def test_create_image(self):
- fn = self.prepare_mocks()
- fn(max_size=None, rbd=self.rbd, target=self.TEMPLATE_PATH)
+ fn = self.mox.CreateMockAnything()
+ fn(max_size=None, target=self.TEMPLATE_PATH)
+
+ rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
+
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ image.check_image_exists().AndReturn(False)
+ image.check_image_exists().AndReturn(False)
+ self.mox.ReplayAll()
+
+ image.create_image(fn, self.TEMPLATE_PATH, None)
+
+ rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
+ cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
+ rbd_name, '--new-format', '--id', self.USER,
+ '--conf', self.CONF)
+ self.assertEqual(fake_processutils.fake_execute_get_log(),
+ [' '.join(cmd)])
+ self.mox.VerifyAll()
+
+ def test_create_image_resize(self):
+ fn = self.mox.CreateMockAnything()
+ full_size = self.SIZE * 2
+ fn(max_size=full_size, target=self.TEMPLATE_PATH)
- self.rbd.RBD_FEATURE_LAYERING = 1
+ rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
- self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
- imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
- ).AndReturn(self.SIZE)
- rbd_name = "%s/%s" % (self.INSTANCE['name'], self.NAME)
- cmd = ('--pool', self.POOL, self.TEMPLATE_PATH,
+ fake_processutils.fake_execute_clear_log()
+ fake_processutils.stub_out_processutils_execute(self.stubs)
+
+ image = self.image_class(self.INSTANCE, self.NAME)
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ image.check_image_exists().AndReturn(False)
+ image.check_image_exists().AndReturn(False)
+ rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
+ cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--new-format', '--id', self.USER,
'--conf', self.CONF)
- self.libvirt_utils.import_rbd_image(self.TEMPLATE_PATH, *cmd)
+ self.mox.StubOutWithMock(image, 'get_disk_size')
+ image.get_disk_size(rbd_name).AndReturn(self.SIZE)
+ self.mox.StubOutWithMock(image.driver, 'resize')
+ image.driver.resize(rbd_name, full_size)
+
self.mox.ReplayAll()
+ image.create_image(fn, self.TEMPLATE_PATH, full_size)
+
+ self.assertEqual(fake_processutils.fake_execute_get_log(),
+ [' '.join(cmd)])
+ self.mox.VerifyAll()
+
+ def test_create_image_already_exists(self):
+ rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
+
image = self.image_class(self.INSTANCE, self.NAME)
- image.create_image(fn, self.TEMPLATE_PATH, None, rbd=self.rbd)
+ self.mox.StubOutWithMock(image, 'check_image_exists')
+ image.check_image_exists().AndReturn(True)
+ self.mox.StubOutWithMock(image, 'get_disk_size')
+ image.get_disk_size(self.TEMPLATE_PATH).AndReturn(self.SIZE)
+ image.check_image_exists().AndReturn(True)
+ rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
+ image.get_disk_size(rbd_name).AndReturn(self.SIZE)
+
+ self.mox.ReplayAll()
+
+ fn = self.mox.CreateMockAnything()
+ image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
@@ -771,8 +820,6 @@ def test_prealloc_image(self):
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
- self.mox.StubOutWithMock(imagebackend, 'rbd')
- self.mox.StubOutWithMock(imagebackend, 'rados')
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
@@ -807,16 +854,6 @@ def test_image_path(self):
self.assertEqual(image.path, rbd_path)
- def test_resize(self):
- image = self.image_class(self.INSTANCE, self.NAME)
- with mock.patch.object(imagebackend, "RBDVolumeProxy") as mock_proxy:
- volume_mock = mock.Mock()
- mock_proxy.side_effect = [mock_proxy]
- mock_proxy.__enter__.side_effect = [volume_mock]
-
- image._resize(image.rbd_name, self.SIZE)
- volume_mock.resize.assert_called_once_with(self.SIZE)
-
class BackendTestCase(test.NoDBTestCase):
INSTANCE = {'name': 'fake-instance',
@@ -859,6 +896,8 @@ def test_image_rbd(self):
pool = "FakePool"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
+ self.mox.StubOutWithMock(rbd_utils, 'rbd')
+ self.mox.StubOutWithMock(rbd_utils, 'rados')
self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
def test_image_default(self):
diff --git a/nova/tests/virt/libvirt/test_imagecache.py b/nova/tests/virt/libvirt/test_imagecache.py
index 7536ebc696..de238f5c11 100644
--- a/nova/tests/virt/libvirt/test_imagecache.py
+++ b/nova/tests/virt/libvirt/test_imagecache.py
@@ -17,7 +17,6 @@
import contextlib
import cStringIO
import hashlib
-import json
import os
import time
@@ -26,13 +25,14 @@
from nova import conductor
from nova import db
from nova.openstack.common import importutils
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import test
from nova.tests import fake_instance
from nova import utils
from nova.virt.libvirt import imagecache
-from nova.virt.libvirt import utils as virtutils
+from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
@@ -165,7 +165,7 @@ def test_list_backing_images_small(self):
'instance-00000002', 'instance-00000003'])
self.stubs.Set(os.path, 'exists',
lambda x: x.find('instance-') != -1)
- self.stubs.Set(virtutils, 'get_disk_backing_file',
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
found = os.path.join(CONF.instances_path,
@@ -187,7 +187,7 @@ def test_list_backing_images_resized(self):
'instance-00000002', 'instance-00000003'])
self.stubs.Set(os.path, 'exists',
lambda x: x.find('instance-') != -1)
- self.stubs.Set(virtutils, 'get_disk_backing_file',
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_'
'10737418240'))
@@ -210,7 +210,7 @@ def test_list_backing_images_instancename(self):
lambda x: ['_base', 'banana-42-hamster'])
self.stubs.Set(os.path, 'exists',
lambda x: x.find('banana-42-hamster') != -1)
- self.stubs.Set(virtutils, 'get_disk_backing_file',
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
found = os.path.join(CONF.instances_path,
@@ -235,7 +235,7 @@ def test_list_backing_images_disk_notexist(self):
def fake_get_disk(disk_path):
raise processutils.ProcessExecutionError()
- self.stubs.Set(virtutils, 'get_disk_backing_file', fake_get_disk)
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file', fake_get_disk)
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = []
@@ -424,7 +424,7 @@ def test_handle_base_image_unused(self):
self.assertEqual(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_used(self):
- self.stubs.Set(virtutils, 'chown', lambda x, y: None)
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
img = '123'
with self._make_base_file() as fname:
@@ -440,7 +440,7 @@ def test_handle_base_image_used(self):
self.assertEqual(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_used_remotely(self):
- self.stubs.Set(virtutils, 'chown', lambda x, y: None)
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
img = '123'
with self._make_base_file() as fname:
@@ -491,7 +491,7 @@ def test_handle_base_image_used_missing(self):
def test_handle_base_image_checksum_fails(self):
self.flags(checksum_base_images=True, group='libvirt')
- self.stubs.Set(virtutils, 'chown', lambda x, y: None)
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
img = '123'
@@ -501,7 +501,7 @@ def test_handle_base_image_checksum_fails(self):
d = {'sha1': '21323454'}
with open('%s.info' % fname, 'w') as f:
- f.write(json.dumps(d))
+ f.write(jsonutils.dumps(d))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
@@ -569,7 +569,7 @@ def exists(path):
self.stubs.Set(os.path, 'exists', lambda x: exists(x))
- self.stubs.Set(virtutils, 'chown', lambda x, y: None)
+ self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
# We need to stub utime as well
self.stubs.Set(os, 'utime', lambda x, y: None)
@@ -609,21 +609,22 @@ def isfile(path):
self.stubs.Set(os.path, 'isfile', lambda x: isfile(x))
# Fake the database call which lists running instances
- all_instances = [{'image_ref': '1',
- 'host': CONF.host,
- 'name': 'instance-1',
- 'uuid': '123',
- 'vm_state': '',
- 'task_state': ''},
- {'image_ref': '1',
- 'kernel_id': '21',
- 'ramdisk_id': '22',
- 'host': CONF.host,
- 'name': 'instance-2',
- 'uuid': '456',
- 'vm_state': '',
- 'task_state': ''}]
-
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'instance-1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '1',
+ 'kernel_id': '21',
+ 'ramdisk_id': '22',
+ 'host': CONF.host,
+ 'name': 'instance-2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''}]
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
image_cache_manager = imagecache.ImageCacheManager()
# Fake the utils call which finds the backing image
@@ -633,7 +634,7 @@ def get_disk_backing_file(path):
return fq_path('%s_5368709120' % hashed_1)
self.fail('Unexpected backing file lookup: %s' % path)
- self.stubs.Set(virtutils, 'get_disk_backing_file',
+ self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: get_disk_backing_file(x))
# Fake out verifying checksums, as that is tested elsewhere
@@ -717,18 +718,23 @@ def test_configured_checksum_path(self):
os.mkdir(os.path.join(tmpdir, '_base'))
# Fake the database call which lists running instances
- all_instances = [{'image_ref': '1',
- 'host': CONF.host,
- 'name': 'instance-1',
- 'uuid': '123',
- 'vm_state': '',
- 'task_state': ''},
- {'image_ref': '1',
- 'host': CONF.host,
- 'name': 'instance-2',
- 'uuid': '456',
- 'vm_state': '',
- 'task_state': ''}]
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'instance-1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '1',
+ 'host': CONF.host,
+ 'name': 'instance-2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''}]
+
+ all_instances = []
+ for instance in instances:
+ all_instances.append(fake_instance.fake_instance_obj(
+ None, **instance))
def touch(filename):
f = open(filename, 'w')
diff --git a/nova/tests/virt/libvirt/test_rbd.py b/nova/tests/virt/libvirt/test_rbd.py
new file mode 100644
index 0000000000..bcbdc25f59
--- /dev/null
+++ b/nova/tests/virt/libvirt/test_rbd.py
@@ -0,0 +1,283 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import mock
+
+from nova import exception
+from nova.openstack.common import log as logging
+from nova import test
+from nova import utils
+from nova.virt.libvirt import rbd_utils
+
+
+LOG = logging.getLogger(__name__)
+
+
+CEPH_MON_DUMP = """dumped monmap epoch 1
+{ "epoch": 1,
+ "fsid": "33630410-6d93-4d66-8e42-3b953cf194aa",
+ "modified": "2013-05-22 17:44:56.343618",
+ "created": "2013-05-22 17:44:56.343618",
+ "mons": [
+ { "rank": 0,
+ "name": "a",
+ "addr": "[::1]:6789\/0"},
+ { "rank": 1,
+ "name": "b",
+ "addr": "[::1]:6790\/0"},
+ { "rank": 2,
+ "name": "c",
+ "addr": "[::1]:6791\/0"},
+ { "rank": 3,
+ "name": "d",
+ "addr": "127.0.0.1:6792\/0"},
+ { "rank": 4,
+ "name": "e",
+ "addr": "example.com:6791\/0"}],
+ "quorum": [
+ 0,
+ 1,
+ 2]}
+"""
+
+
+class RbdTestCase(test.NoDBTestCase):
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def setUp(self, mock_rados, mock_rbd):
+ super(RbdTestCase, self).setUp()
+
+ self.mock_rados = mock_rados
+ self.mock_rados.Rados = mock.Mock
+ self.mock_rados.Rados.ioctx = mock.Mock()
+ self.mock_rados.Rados.connect = mock.Mock()
+ self.mock_rados.Rados.shutdown = mock.Mock()
+ self.mock_rados.Rados.open_ioctx = mock.Mock()
+ self.mock_rados.Rados.open_ioctx.return_value = \
+ self.mock_rados.Rados.ioctx
+ self.mock_rados.Error = Exception
+
+ self.mock_rbd = mock_rbd
+ self.mock_rbd.RBD = mock.Mock
+ self.mock_rbd.Image = mock.Mock
+ self.mock_rbd.Image.close = mock.Mock()
+ self.mock_rbd.RBD.Error = Exception
+
+ self.rbd_pool = 'rbd'
+ self.driver = rbd_utils.RBDDriver(self.rbd_pool, None, None)
+
+ self.volume_name = u'volume-00000001'
+
+ def tearDown(self):
+ super(RbdTestCase, self).tearDown()
+
+ def test_good_locations(self):
+ locations = ['rbd://fsid/pool/image/snap',
+ 'rbd://%2F/%2F/%2F/%2F', ]
+ map(self.driver.parse_url, locations)
+
+ def test_bad_locations(self):
+ locations = ['rbd://image',
+ 'http://path/to/somewhere/else',
+ 'rbd://image/extra',
+ 'rbd://image/',
+ 'rbd://fsid/pool/image/',
+ 'rbd://fsid/pool/image/snap/',
+ 'rbd://///', ]
+ for loc in locations:
+ self.assertRaises(exception.ImageUnacceptable,
+ self.driver.parse_url, loc)
+ self.assertFalse(self.driver.is_cloneable({'url': loc},
+ {'disk_format': 'raw'}))
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_cloneable(self, mock_rados, mock_rbd, mock_get_fsid):
+ mock_get_fsid.return_value = 'abc'
+ location = {'url': 'rbd://abc/pool/image/snap'}
+ info = {'disk_format': 'raw'}
+ self.assertTrue(self.driver.is_cloneable(location, info))
+ self.assertTrue(mock_get_fsid.called)
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ def test_uncloneable_different_fsid(self, mock_get_fsid):
+ mock_get_fsid.return_value = 'abc'
+ location = {'url': 'rbd://def/pool/image/snap'}
+ self.assertFalse(
+ self.driver.is_cloneable(location, {'disk_format': 'raw'}))
+ self.assertTrue(mock_get_fsid.called)
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_uncloneable_unreadable(self, mock_rados, mock_rbd, mock_proxy,
+ mock_get_fsid):
+ mock_get_fsid.return_value = 'abc'
+ location = {'url': 'rbd://abc/pool/image/snap'}
+
+ mock_proxy.side_effect = mock_rbd.Error
+
+ self.assertFalse(
+ self.driver.is_cloneable(location, {'disk_format': 'raw'}))
+ mock_proxy.assert_called_once_with(self.driver, 'image', pool='pool',
+ snapshot='snap', read_only=True)
+ self.assertTrue(mock_get_fsid.called)
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ def test_uncloneable_bad_format(self, mock_get_fsid):
+ mock_get_fsid.return_value = 'abc'
+ location = {'url': 'rbd://abc/pool/image/snap'}
+ formats = ['qcow2', 'vmdk', 'vdi']
+ for f in formats:
+ self.assertFalse(
+ self.driver.is_cloneable(location, {'disk_format': f}))
+ self.assertTrue(mock_get_fsid.called)
+
+ @mock.patch.object(utils, 'execute')
+ def test_get_mon_addrs(self, mock_execute):
+ mock_execute.return_value = (CEPH_MON_DUMP, '')
+ hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com']
+ ports = ['6789', '6790', '6791', '6792', '6791']
+ self.assertEqual((hosts, ports), self.driver.get_mon_addrs())
+
+ @mock.patch.object(rbd_utils, 'RADOSClient')
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_clone(self, mock_rados, mock_rbd, mock_client):
+ pool = u'images'
+ image = u'image-name'
+ snap = u'snapshot-name'
+ location = {'url': u'rbd://fsid/%s/%s/%s' % (pool, image, snap)}
+
+ client_stack = []
+
+ def mock__enter__(inst):
+ def _inner():
+ client_stack.append(inst)
+ return inst
+ return _inner
+
+ client = mock_client.return_value
+ # capture both rados client used to perform the clone
+ client.__enter__.side_effect = mock__enter__(client)
+
+ rbd = mock_rbd.RBD.return_value
+
+ self.driver.clone(location, self.volume_name)
+
+ args = [client_stack[0].ioctx, str(image), str(snap),
+ client_stack[1].ioctx, str(self.volume_name)]
+ kwargs = {'features': mock_rbd.RBD_FEATURE_LAYERING}
+ rbd.clone.assert_called_once_with(*args, **kwargs)
+ self.assertEqual(client.__enter__.call_count, 2)
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_resize(self, mock_proxy):
+ size = 1024
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ self.driver.resize(self.volume_name, size)
+ proxy.resize.assert_called_once_with(size)
+
+ @mock.patch.object(rbd_utils.RBDDriver, '_disconnect_from_rados')
+ @mock.patch.object(rbd_utils.RBDDriver, '_connect_to_rados')
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_rbd_volume_proxy_init(self, mock_rados, mock_rbd,
+ mock_connect_from_rados,
+ mock_disconnect_from_rados):
+ mock_connect_from_rados.return_value = (None, None)
+ mock_disconnect_from_rados.return_value = (None, None)
+
+ with rbd_utils.RBDVolumeProxy(self.driver, self.volume_name):
+ mock_connect_from_rados.assert_called_once_with(None)
+ self.assertFalse(mock_disconnect_from_rados.called)
+
+ mock_disconnect_from_rados.assert_called_once_with(None, None)
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_connect_to_rados_default(self, mock_rados, mock_rbd):
+ ret = self.driver._connect_to_rados()
+ self.assertTrue(self.mock_rados.Rados.connect.called)
+ self.assertTrue(self.mock_rados.Rados.open_ioctx.called)
+ self.assertIsInstance(ret[0], self.mock_rados.Rados)
+ self.assertEqual(ret[1], self.mock_rados.Rados.ioctx)
+ self.mock_rados.Rados.open_ioctx.assert_called_with(self.rbd_pool)
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_connect_to_rados_different_pool(self, mock_rados, mock_rbd):
+ ret = self.driver._connect_to_rados('alt_pool')
+ self.assertTrue(self.mock_rados.Rados.connect.called)
+ self.assertTrue(self.mock_rados.Rados.open_ioctx.called)
+ self.assertIsInstance(ret[0], self.mock_rados.Rados)
+ self.assertEqual(ret[1], self.mock_rados.Rados.ioctx)
+ self.mock_rados.Rados.open_ioctx.assert_called_with('alt_pool')
+
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_connect_to_rados_error(self, mock_rados):
+ mock_rados.Rados.open_ioctx.side_effect = mock_rados.Error
+ self.assertRaises(mock_rados.Error, self.driver._connect_to_rados)
+ mock_rados.Rados.open_ioctx.assert_called_once_with(self.rbd_pool)
+ mock_rados.Rados.shutdown.assert_called_once_with()
+
+ def test_ceph_args_none(self):
+ self.driver.rbd_user = None
+ self.driver.ceph_conf = None
+ self.assertEqual([], self.driver.ceph_args())
+
+ def test_ceph_args_rbd_user(self):
+ self.driver.rbd_user = 'foo'
+ self.driver.ceph_conf = None
+ self.assertEqual(['--id', 'foo'], self.driver.ceph_args())
+
+ def test_ceph_args_ceph_conf(self):
+ self.driver.rbd_user = None
+ self.driver.ceph_conf = '/path/bar.conf'
+ self.assertEqual(['--conf', '/path/bar.conf'],
+ self.driver.ceph_args())
+
+ def test_ceph_args_rbd_user_and_ceph_conf(self):
+ self.driver.rbd_user = 'foo'
+ self.driver.ceph_conf = '/path/bar.conf'
+ self.assertEqual(['--id', 'foo', '--conf', '/path/bar.conf'],
+ self.driver.ceph_args())
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_exists(self, mock_proxy):
+ snapshot = 'snap'
+ proxy = mock_proxy.return_value
+ self.assertTrue(self.driver.exists(self.volume_name,
+ self.rbd_pool,
+ snapshot))
+ proxy.__enter__.assert_called_once_with()
+ proxy.__exit__.assert_called_once_with(None, None, None)
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ @mock.patch.object(rbd_utils, 'RADOSClient')
+ def test_cleanup_volumes(self, mock_client, mock_rados, mock_rbd):
+ instance = {'uuid': '12345'}
+
+ rbd = mock_rbd.RBD.return_value
+ rbd.list.return_value = ['12345_test', '111_test']
+
+ client = mock_client.return_value
+ self.driver.cleanup_volumes(instance)
+ rbd.remove.assert_called_once_with(client.ioctx, '12345_test')
+ client.__enter__.assert_called_once_with()
+ client.__exit__.assert_called_once_with(None, None, None)
diff --git a/nova/tests/virt/libvirt/test_utils.py b/nova/tests/virt/libvirt/test_utils.py
index 827b2cf89c..484919f18a 100644
--- a/nova/tests/virt/libvirt/test_utils.py
+++ b/nova/tests/virt/libvirt/test_utils.py
@@ -47,46 +47,6 @@ def test_get_disk_type(self):
disk_type = libvirt_utils.get_disk_type(path)
self.assertEqual(disk_type, 'raw')
- def test_list_rbd_volumes(self):
- conf = '/etc/ceph/fake_ceph.conf'
- pool = 'fake_pool'
- user = 'user'
- self.flags(images_rbd_ceph_conf=conf, group='libvirt')
- self.flags(rbd_user=user, group='libvirt')
- self.mox.StubOutWithMock(libvirt_utils.utils,
- 'execute')
- libvirt_utils.utils.execute('rbd', '-p', pool, 'ls', '--id',
- user,
- '--conf', conf).AndReturn(("Out", "Error"))
- self.mox.ReplayAll()
-
- libvirt_utils.list_rbd_volumes(pool)
-
- self.mox.VerifyAll()
-
- def test_remove_rbd_volumes(self):
- conf = '/etc/ceph/fake_ceph.conf'
- pool = 'fake_pool'
- user = 'user'
- names = ['volume1', 'volume2', 'volume3']
- self.flags(images_rbd_ceph_conf=conf, group='libvirt')
- self.flags(rbd_user=user, group='libvirt')
- self.mox.StubOutWithMock(libvirt_utils.utils, 'execute')
- libvirt_utils.utils.execute('rbd', 'rm', os.path.join(pool, 'volume1'),
- '--id', user, '--conf', conf, attempts=3,
- run_as_root=True)
- libvirt_utils.utils.execute('rbd', 'rm', os.path.join(pool, 'volume2'),
- '--id', user, '--conf', conf, attempts=3,
- run_as_root=True)
- libvirt_utils.utils.execute('rbd', 'rm', os.path.join(pool, 'volume3'),
- '--id', user, '--conf', conf, attempts=3,
- run_as_root=True)
- self.mox.ReplayAll()
-
- libvirt_utils.remove_rbd_volumes(pool, *names)
-
- self.mox.VerifyAll()
-
@mock.patch('nova.utils.execute')
def test_copy_image_local_cp(self, mock_execute):
libvirt_utils.copy_image('src', 'dest')
diff --git a/nova/tests/virt/libvirt/test_vif.py b/nova/tests/virt/libvirt/test_vif.py
index 68ba269a68..2d33f618d2 100644
--- a/nova/tests/virt/libvirt/test_vif.py
+++ b/nova/tests/virt/libvirt/test_vif.py
@@ -214,12 +214,34 @@ class LibvirtVifTestCase(test.TestCase):
type=network_model.VIF_TYPE_MLNX_DIRECT,
devname='tap-xxx-yyy-zzz')
+ vif_mlnx_net = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_mlnx,
+ type=network_model.VIF_TYPE_MLNX_DIRECT,
+ details={'physical_network':
+ 'fake_phy_network'},
+ devname='tap-xxx-yyy-zzz')
+
vif_midonet = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_midonet,
type=network_model.VIF_TYPE_MIDONET,
devname='tap-xxx-yyy-zzz')
+ vif_vhostuser_defpath = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ type=network_model.
+ VIF_TYPE_VHOSTUSER,
+ vhostuser_mode='server')
+
+ vif_vhostuser_custpath = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ type=network_model.
+ VIF_TYPE_VHOSTUSER,
+ vhostuser_path=
+ '/tmp/custompath.sock',
+ vhostuser_mode='server')
+
vif_iovisor = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
@@ -318,7 +340,7 @@ def _get_instance_xml(self, driver, vif, image_meta=None):
default_inst_type['extra_specs'] = dict(extra_specs + quota_bandwidth)
conf = self._get_conf()
nic = driver.get_config(self.instance, vif, image_meta,
- default_inst_type)
+ default_inst_type, CONF.libvirt.virt_type)
conf.add_device(nic)
return conf.to_xml()
@@ -374,20 +396,25 @@ def test_model_kvm(self):
d = vif.LibvirtGenericVIFDriver(self._get_conn())
xml = self._get_instance_xml(d, self.vif_bridge)
-
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
- def test_model_kvm_custom(self):
- self.flags(use_virtio_for_bridges=True,
- virt_type='kvm',
- group='libvirt')
+ def test_model_kvm_qemu_custom(self):
+ for virt in ('kvm', 'qemu'):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type=virt,
+ group='libvirt')
- d = vif.LibvirtGenericVIFDriver(self._get_conn())
- image_meta = {'properties': {'hw_vif_model':
- network_model.VIF_MODEL_E1000}}
- xml = self._get_instance_xml(d, self.vif_bridge,
- image_meta)
- self._assertModel(xml, network_model.VIF_MODEL_E1000)
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ supported = (network_model.VIF_MODEL_NE2K_PCI,
+ network_model.VIF_MODEL_PCNET,
+ network_model.VIF_MODEL_RTL8139,
+ network_model.VIF_MODEL_E1000,
+ network_model.VIF_MODEL_SPAPR_VLAN)
+ for model in supported:
+ image_meta = {'properties': {'hw_vif_model': model}}
+ xml = self._get_instance_xml(d, self.vif_bridge,
+ image_meta)
+ self._assertModel(xml, model)
def test_model_kvm_bogus(self):
self.flags(use_virtio_for_bridges=True,
@@ -446,9 +473,8 @@ def test_model_qemu_no_firewall(self):
self.vif_8021qbg,
self.vif_iovisor,
self.vif_mlnx,
+ self.vif_ovs,
)
- self._test_model_qemu(self.vif_ovs,
- libvirt_version=vif.LIBVIRT_OVS_VPORT_VERSION)
def test_model_qemu_iptables(self):
self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
@@ -499,23 +525,8 @@ def _check_ivs_ethernet_driver(self, d, vif, dev_prefix):
script = node.find("script").get("path")
self.assertEqual(script, "")
- def _check_ovs_ethernet_driver(self, d, vif, dev_prefix):
- self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
- xml = self._get_instance_xml(d, vif)
- node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
- self.vif_ovs, prefix=dev_prefix)
- script = node.find("script").get("path")
- self.assertEqual(script, "")
-
- def test_ovs_ethernet_driver(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
- self._check_ovs_ethernet_driver(d,
- self.vif_ovs,
- "tap")
-
def test_unplug_ivs_ethernet(self):
- d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
with mock.patch.object(linux_net, 'delete_ivs_vif_port') as delete:
delete.side_effect = processutils.ProcessExecutionError
d.unplug_ivs_ethernet(None, self.vif_ovs)
@@ -684,6 +695,37 @@ def test_plug_iovisor(self, device_exists):
}
d.plug_iovisor(instance, self.vif_ivs)
+ def test_unplug_mlnx_with_details(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ execute.side_effect = processutils.ProcessExecutionError
+ d.unplug_mlnx_direct(None, self.vif_mlnx_net)
+ execute.assert_called_once_with('ebrctl', 'del-port',
+ 'fake_phy_network',
+ 'ca:fe:de:ad:be:ef',
+ run_as_root=True)
+
+ def test_plug_mlnx_with_details(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ d.plug_mlnx_direct(self.instance, self.vif_mlnx_net)
+ execute.assert_called_once_with('ebrctl', 'add-port',
+ 'ca:fe:de:ad:be:ef',
+ 'instance-uuid',
+ 'fake_phy_network',
+ 'mlnx_direct',
+ 'eth-xxx-yyy-zzz',
+ run_as_root=True)
+
+ def test_plug_mlnx_no_physical_network(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
+ with mock.patch.object(utils, 'execute') as execute:
+ self.assertRaises(exception.NovaException,
+ d.plug_mlnx_direct,
+ self.instance,
+ self.vif_mlnx)
+ self.assertEqual(0, execute.call_count)
+
def test_ivs_ethernet_driver(self):
d = vif.LibvirtGenericVIFDriver(self._get_conn(ver=9010))
self._check_ivs_ethernet_driver(d,
@@ -771,7 +813,7 @@ def test_direct_plug_with_port_filter_cap_no_nova_firewall(self):
br_want = self.vif_midonet['devname']
xml = self._get_instance_xml(d, self.vif_ovs_filter_cap)
node = self._get_node(xml)
- self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
+ self._assertTypeAndMacEquals(node, "bridge", "target", "dev",
self.vif_ovs_filter_cap, br_want)
def _check_neutron_hybrid_driver(self, d, vif, br_want):
@@ -819,6 +861,32 @@ def test_midonet_ethernet_vif_driver(self):
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_midonet, br_want)
+ def test_vhostuser_defpath_vif_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_vhostuser_defpath)
+ node = self._get_node(xml)
+ self._assertTypeEquals(node, "vhostuser", "source", "type",
+ "unix")
+ self._assertTypeEquals(node, "vhostuser", "source", "path",
+ "/var/lib/libvirt/qemu/vhostuser")
+ self._assertTypeEquals(node, "vhostuser", "source", "mode",
+ "server")
+ self._assertMacEquals(node, self.vif_vhostuser_defpath)
+ self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
+
+ def test_vhostuser_custpath_vif_driver(self):
+ d = vif.LibvirtGenericVIFDriver(self._get_conn())
+ xml = self._get_instance_xml(d, self.vif_vhostuser_custpath)
+ node = self._get_node(xml)
+ self._assertTypeEquals(node, "vhostuser", "source", "type",
+ "unix")
+ self._assertTypeEquals(node, "vhostuser", "source", "path",
+ "/tmp/custompath.sock")
+ self._assertTypeEquals(node, "vhostuser", "source", "mode",
+ "server")
+ self._assertMacEquals(node, self.vif_vhostuser_custpath)
+ self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
+
def test_generic_8021qbh_driver(self):
d = vif.LibvirtGenericVIFDriver(self._get_conn())
xml = self._get_instance_xml(d, self.vif_8021qbh)
diff --git a/nova/tests/virt/libvirt/test_volume.py b/nova/tests/virt/libvirt/test_volume.py
index d86b90d74c..bbd7cef3da 100644
--- a/nova/tests/virt/libvirt/test_volume.py
+++ b/nova/tests/virt/libvirt/test_volume.py
@@ -343,6 +343,26 @@ def test_libvirt_iscsi_driver_disconnect_multipath_error(self):
['-f', 'fake-multipath-devname'],
check_exit_code=[0, 1])
+ def test_sanitize_log_run_iscsiadm(self):
+ # Tests that the parameters to the _run_iscsiadm function are sanitized
+ # for passwords when logged.
+ def fake_debug(*args, **kwargs):
+ self.assertIn('node.session.auth.password', args[0])
+ self.assertNotIn('scrubme', args[0])
+
+ libvirt_driver = volume.LibvirtISCSIVolumeDriver(self.fake_conn)
+ connection_info = self.iscsi_connection(self.vol, self.location,
+ self.iqn)
+ iscsi_properties = connection_info['data']
+ with mock.patch.object(volume.LOG, 'debug',
+ side_effect=fake_debug) as debug_mock:
+ libvirt_driver._iscsiadm_update(iscsi_properties,
+ 'node.session.auth.password',
+ 'scrubme')
+ # we don't care what the log message is, we just want to make sure
+ # our stub method is called which asserts the password is scrubbed
+ self.assertTrue(debug_mock.called)
+
def iser_connection(self, volume, location, iqn):
return {
'driver_volume_type': 'iser',
@@ -931,14 +951,22 @@ def test_libvirt_fibrechan_driver(self):
mount_device = "vde"
conf = libvirt_driver.connect_volume(connection_info,
self.disk_info)
+ self.assertEqual('1234567890',
+ connection_info['data']['multipath_id'])
tree = conf.format_dom()
- self.assertEqual(tree.get('type'), 'block')
- self.assertEqual(tree.find('./source').get('dev'),
- multipath_devname)
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(multipath_devname,
+ tree.find('./source').get('dev'))
+ # Test the scenario where multipath_id is returned
+ libvirt_driver.disconnect_volume(connection_info, mount_device)
+ expected_commands = []
+ self.assertEqual(expected_commands, self.executes)
+ # Test the scenario where multipath_id is not returned
connection_info["data"]["devices"] = devices["devices"]
+ del connection_info["data"]["multipath_id"]
libvirt_driver.disconnect_volume(connection_info, mount_device)
expected_commands = []
- self.assertEqual(self.executes, expected_commands)
+ self.assertEqual(expected_commands, self.executes)
# Should not work for anything other than string, unicode, and list
connection_info = self.fibrechan_connection(self.vol,
diff --git a/nova/tests/virt/test_block_device.py b/nova/tests/virt/test_block_device.py
index ae6d16ed1e..a16ade1e4f 100644
--- a/nova/tests/virt/test_block_device.py
+++ b/nova/tests/virt/test_block_device.py
@@ -12,12 +12,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+import contextlib
+
import mock
from nova import block_device
from nova import context
from nova.openstack.common import jsonutils
from nova import test
+from nova.tests import fake_instance
from nova.tests import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
@@ -31,7 +34,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
- 'image': driver_block_device.DriverImageBlockDevice
+ 'image': driver_block_device.DriverImageBlockDevice,
+ 'blank': driver_block_device.DriverBlankBlockDevice
}
swap_bdm = block_device.BlockDeviceDict(
@@ -163,6 +167,34 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
+ blank_bdm = block_device.BlockDeviceDict(
+ {'id': 6, 'instance_uuid': 'fake-instance',
+ 'device_name': '/dev/sda2',
+ 'delete_on_termination': True,
+ 'volume_size': 3,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'source_type': 'blank',
+ 'destination_type': 'volume',
+ 'connection_info': '{"fake": "connection_info"}',
+ 'snapshot_id': 'fake-snapshot-id-1',
+ 'volume_id': 'fake-volume-id-2',
+ 'boot_index': -1})
+
+ blank_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True,
+ 'disk_bus': 'scsi',
+ 'device_type': 'disk',
+ 'guest_format': None,
+ 'boot_index': -1}
+
+ blank_legacy_driver_bdm = {
+ 'mount_device': '/dev/sda2',
+ 'connection_info': {"fake": "connection_info"},
+ 'delete_on_termination': True}
+
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
@@ -195,8 +227,6 @@ def _test_driver_device(self, name):
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
- for no_pass in set(db_bdm.keys()) - test_bdm._proxy_as_attr:
- self.assertRaises(AttributeError, getattr, test_bdm, no_pass)
# Make sure that all others raise _invalidType
for other_name, cls in self.driver_classes.iteritems():
@@ -215,6 +245,11 @@ def _test_driver_device(self, name):
save_mock.assert_called_once_with(self.context)
+ # Test the save method with no context passed
+ with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
+ test_bdm.save()
+ save_mock.assert_called_once_with()
+
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm" % name).copy()
@@ -277,10 +312,20 @@ def test_driver_image_block_device_destination_local(self):
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'], bdm)
+ def test_driver_blank_block_device(self):
+ self._test_driver_device('blank')
+
+ test_bdm = self.driver_classes['blank'](
+ self.blank_bdm)
+ self.assertEqual(6, test_bdm._bdm_obj.id)
+ self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
+ self.assertEqual(3, test_bdm.volume_size)
+
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, check_attach=True,
fail_check_attach=False, driver_attach=False,
- fail_driver_attach=False, access_mode='rw'):
+ fail_driver_attach=False, volume_attach=True,
+ access_mode='rw'):
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
@@ -332,16 +377,18 @@ def _test_volume_attach(self, driver_bdm, bdm_dict,
expected_conn_info).AndReturn(None)
return instance, expected_conn_info
- self.volume_api.attach(elevated_context, fake_volume['id'],
- 'fake_uuid', bdm_dict['device_name'],
- mode=access_mode).AndReturn(None)
+ if volume_attach:
+ self.volume_api.attach(elevated_context, fake_volume['id'],
+ 'fake_uuid', bdm_dict['device_name'],
+ mode=access_mode).AndReturn(None)
driver_bdm._bdm_obj.save(self.context).AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
- volume = {'id': 'fake-volume-id-1'}
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
@@ -355,7 +402,8 @@ def test_volume_attach(self):
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
- volume = {'id': 'fake-volume-id-1'}
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
@@ -379,10 +427,29 @@ def check_volume_attach_check_attach_fails(self):
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
+ def test_volume_no_volume_attach(self):
+ test_bdm = self.driver_classes['volume'](
+ self.volume_bdm)
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+
+ instance, expected_conn_info = self._test_volume_attach(
+ test_bdm, self.volume_bdm, volume, check_attach=False,
+ driver_attach=False)
+
+ self.mox.ReplayAll()
+
+ test_bdm.attach(self.context, instance,
+ self.volume_api, self.virt_driver,
+ do_check_attach=False, do_driver_attach=False)
+ self.assertThat(test_bdm['connection_info'],
+ matchers.DictMatches(expected_conn_info))
+
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
- volume = {'id': 'fake-volume-id-1'}
+ volume = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
@@ -415,8 +482,8 @@ def test_refresh_connection(self):
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
- connection_info = {'data': {}}
- expected_conn_info = {'data': {},
+ connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
+ expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
@@ -439,8 +506,10 @@ def test_snapshot_attach_no_volume(self):
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
- snapshot = {'id': 'fake-snapshot-id-1'}
- volume = {'id': 'fake-volume-id-2'}
+ snapshot = {'id': 'fake-volume-id-1',
+ 'attach_status': 'detached'}
+ volume = {'id': 'fake-volume-id-2',
+ 'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
@@ -471,7 +540,8 @@ def test_snapshot_attach_volume(self):
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
- self.virt_driver).AndReturn(None)
+ self.virt_driver, do_check_attach=True
+ ).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
@@ -484,7 +554,8 @@ def test_image_attach_no_volume(self):
test_bdm = self.driver_classes['image'](no_volume_image)
image = {'id': 'fake-image-id-1'}
- volume = {'id': 'fake-volume-id-2'}
+ volume = {'id': 'fake-volume-id-2',
+ 'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
@@ -513,13 +584,41 @@ def test_image_attach_volume(self):
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
- self.virt_driver).AndReturn(None)
+ self.virt_driver, do_check_attach=True
+ ).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
+ def test_blank_attach_volume(self):
+ no_blank_volume = self.blank_bdm.copy()
+ no_blank_volume['volume_id'] = None
+ test_bdm = self.driver_classes['blank'](no_blank_volume)
+ instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
+ **{'uuid': 'fake-uuid'})
+ volume_class = self.driver_classes['volume']
+ volume = {'id': 'fake-volume-id-2',
+ 'display_name': 'fake-uuid-blank-vol'}
+
+ with contextlib.nested(
+ mock.patch.object(self.volume_api, 'create', return_value=volume),
+ mock.patch.object(volume_class, 'attach')
+ ) as (vol_create, vol_attach):
+ test_bdm.attach(self.context, instance, self.volume_api,
+ self.virt_driver)
+
+ vol_create.assert_called_once_with(self.context,
+ test_bdm.volume_size,
+ 'fake-uuid-blank-vol',
+ '')
+ vol_attach.assert_called_once_with(self.context, instance,
+ self.volume_api,
+ self.virt_driver,
+ do_check_attach=True)
+ self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
+
def test_convert_block_devices(self):
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'],
@@ -567,3 +666,19 @@ def test_is_implemented(self):
local_image = self.image_bdm.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(local_image))
+
+ def test_is_block_device_mapping(self):
+ test_swap = self.driver_classes['swap'](self.swap_bdm)
+ test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
+ test_image = self.driver_classes['image'](self.image_bdm)
+ test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
+ test_volume = self.driver_classes['volume'](self.volume_bdm)
+ test_blank = self.driver_classes['blank'](self.blank_bdm)
+
+ for bdm in (test_image, test_snapshot, test_volume, test_blank):
+ self.assertTrue(driver_block_device.is_block_device_mapping(
+ bdm._bdm_obj))
+
+ for bdm in (test_swap, test_ephemeral):
+ self.assertFalse(driver_block_device.is_block_device_mapping(
+ bdm._bdm_obj))
diff --git a/nova/tests/virt/test_diagnostics.py b/nova/tests/virt/test_diagnostics.py
index 3f0b5b3ca4..f3969fc09f 100644
--- a/nova/tests/virt/test_diagnostics.py
+++ b/nova/tests/virt/test_diagnostics.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import exception
from nova import test
from nova.virt import diagnostics
@@ -208,3 +209,23 @@ def test_diagnostics_serialize(self):
'version': '1.0'}
result = diags.serialize()
self.assertEqual(expected, result)
+
+ def test_diagnostics_invalid_input(self):
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ cpu_details='invalid type')
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ cpu_details=['invalid entry'])
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ nic_details='invalid type')
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ nic_details=['invalid entry'])
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ disk_details='invalid type')
+ self.assertRaises(exception.InvalidInput,
+ diagnostics.Diagnostics,
+ disk_details=['invalid entry'])
diff --git a/nova/tests/virt/test_hardware.py b/nova/tests/virt/test_hardware.py
index 9a08cd6dfc..754b96dd2b 100644
--- a/nova/tests/virt/test_hardware.py
+++ b/nova/tests/virt/test_hardware.py
@@ -14,14 +14,28 @@
from nova import exception
from nova import test
+from nova.tests import matchers
from nova.virt import hardware as hw
class FakeFlavor():
- def __init__(self, vcpus, extra_specs):
+ def __init__(self, vcpus, memory, extra_specs):
self.vcpus = vcpus
+ self.memory_mb = memory
self.extra_specs = extra_specs
+ def __getitem__(self, item):
+ try:
+ return getattr(self, item)
+ except AttributeError:
+ raise KeyError(item)
+
+ def get(self, item, default=None):
+ try:
+ return getattr(self, item)
+ except AttributeError:
+ return default
+
class CpuSetTestCase(test.NoDBTestCase):
def test_get_vcpu_pin_set(self):
@@ -169,7 +183,7 @@ class VCPUTopologyTest(test.NoDBTestCase):
def test_validate_config(self):
testdata = [
{ # Flavor sets preferred topology only
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
@@ -182,7 +196,7 @@ def test_validate_config(self):
)
},
{ # Image topology overrides flavor
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
@@ -200,7 +214,7 @@ def test_validate_config(self):
)
},
{ # Partial image topology overrides flavor
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
@@ -215,7 +229,7 @@ def test_validate_config(self):
)
},
{ # Restrict use of threads
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_max_threads": "2",
}),
"image": {
@@ -228,7 +242,7 @@ def test_validate_config(self):
)
},
{ # Force use of at least two sockets
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
@@ -240,7 +254,7 @@ def test_validate_config(self):
)
},
{ # Image limits reduce flavor
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
@@ -254,7 +268,7 @@ def test_validate_config(self):
)
},
{ # Image limits kill flavor preferred
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_sockets": "2",
"hw:cpu_cores": "8",
"hw:cpu_threads": "1",
@@ -269,7 +283,7 @@ def test_validate_config(self):
)
},
{ # Image limits cannot exceed flavor
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
@@ -281,7 +295,7 @@ def test_validate_config(self):
"expect": exception.ImageVCPULimitsRangeExceeded,
},
{ # Image preferred cannot exceed flavor
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
@@ -294,24 +308,24 @@ def test_validate_config(self):
},
]
- for test in testdata:
- if type(test["expect"]) == tuple:
+ for topo_test in testdata:
+ if type(topo_test["expect"]) == tuple:
(preferred,
maximum) = hw.VirtCPUTopology.get_topology_constraints(
- test["flavor"],
- test["image"])
-
- self.assertEqual(test["expect"][0], preferred.sockets)
- self.assertEqual(test["expect"][1], preferred.cores)
- self.assertEqual(test["expect"][2], preferred.threads)
- self.assertEqual(test["expect"][3], maximum.sockets)
- self.assertEqual(test["expect"][4], maximum.cores)
- self.assertEqual(test["expect"][5], maximum.threads)
+ topo_test["flavor"],
+ topo_test["image"])
+
+ self.assertEqual(topo_test["expect"][0], preferred.sockets)
+ self.assertEqual(topo_test["expect"][1], preferred.cores)
+ self.assertEqual(topo_test["expect"][2], preferred.threads)
+ self.assertEqual(topo_test["expect"][3], maximum.sockets)
+ self.assertEqual(topo_test["expect"][4], maximum.cores)
+ self.assertEqual(topo_test["expect"][5], maximum.threads)
else:
- self.assertRaises(test["expect"],
+ self.assertRaises(topo_test["expect"],
hw.VirtCPUTopology.get_topology_constraints,
- test["flavor"],
- test["image"])
+ topo_test["flavor"],
+ topo_test["image"])
def test_possible_configs(self):
testdata = [
@@ -400,28 +414,28 @@ def test_possible_configs(self):
},
]
- for test in testdata:
- if type(test["expect"]) == list:
+ for topo_test in testdata:
+ if type(topo_test["expect"]) == list:
actual = []
for topology in hw.VirtCPUTopology.get_possible_topologies(
- test["vcpus"],
- hw.VirtCPUTopology(test["maxsockets"],
- test["maxcores"],
- test["maxthreads"]),
- test["allow_threads"]):
+ topo_test["vcpus"],
+ hw.VirtCPUTopology(topo_test["maxsockets"],
+ topo_test["maxcores"],
+ topo_test["maxthreads"]),
+ topo_test["allow_threads"]):
actual.append([topology.sockets,
topology.cores,
topology.threads])
- self.assertEqual(test["expect"], actual)
+ self.assertEqual(topo_test["expect"], actual)
else:
- self.assertRaises(test["expect"],
+ self.assertRaises(topo_test["expect"],
hw.VirtCPUTopology.get_possible_topologies,
- test["vcpus"],
- hw.VirtCPUTopology(test["maxsockets"],
- test["maxcores"],
- test["maxthreads"]),
- test["allow_threads"])
+ topo_test["vcpus"],
+ hw.VirtCPUTopology(topo_test["maxsockets"],
+ topo_test["maxcores"],
+ topo_test["maxthreads"]),
+ topo_test["allow_threads"])
def test_sorting_configs(self):
testdata = [
@@ -492,32 +506,32 @@ def test_sorting_configs(self):
},
]
- for test in testdata:
+ for topo_test in testdata:
actual = []
possible = hw.VirtCPUTopology.get_possible_topologies(
- test["vcpus"],
- hw.VirtCPUTopology(test["maxsockets"],
- test["maxcores"],
- test["maxthreads"]),
- test["allow_threads"])
+ topo_test["vcpus"],
+ hw.VirtCPUTopology(topo_test["maxsockets"],
+ topo_test["maxcores"],
+ topo_test["maxthreads"]),
+ topo_test["allow_threads"])
tops = hw.VirtCPUTopology.sort_possible_topologies(
possible,
- hw.VirtCPUTopology(test["sockets"],
- test["cores"],
- test["threads"]))
+ hw.VirtCPUTopology(topo_test["sockets"],
+ topo_test["cores"],
+ topo_test["threads"]))
for topology in tops:
actual.append([topology.sockets,
topology.cores,
topology.threads])
- self.assertEqual(test["expect"], actual)
+ self.assertEqual(topo_test["expect"], actual)
def test_best_config(self):
testdata = [
{ # Flavor sets preferred topology only
"allow_threads": True,
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1"
@@ -529,7 +543,7 @@ def test_best_config(self):
},
{ # Image topology overrides flavor
"allow_threads": True,
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
@@ -546,7 +560,7 @@ def test_best_config(self):
},
{ # Image topology overrides flavor
"allow_threads": False,
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1",
@@ -563,7 +577,7 @@ def test_best_config(self):
},
{ # Partial image topology overrides flavor
"allow_threads": True,
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_sockets": "8",
"hw:cpu_cores": "2",
"hw:cpu_threads": "1"
@@ -577,7 +591,7 @@ def test_best_config(self):
},
{ # Restrict use of threads
"allow_threads": True,
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_max_threads": "1"
}),
"image": {
@@ -587,7 +601,7 @@ def test_best_config(self):
},
{ # Force use of at least two sockets
"allow_threads": True,
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
}),
@@ -598,7 +612,7 @@ def test_best_config(self):
},
{ # Image limits reduce flavor
"allow_threads": True,
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_max_sockets": "8",
"hw:cpu_max_cores": "8",
"hw:cpu_max_threads": "1",
@@ -612,7 +626,7 @@ def test_best_config(self):
},
{ # Image limits kill flavor preferred
"allow_threads": True,
- "flavor": FakeFlavor(16, {
+ "flavor": FakeFlavor(16, 2048, {
"hw:cpu_sockets": "2",
"hw:cpu_cores": "8",
"hw:cpu_threads": "1",
@@ -626,12 +640,440 @@ def test_best_config(self):
},
]
- for test in testdata:
+ for topo_test in testdata:
topology = hw.VirtCPUTopology.get_desirable_configs(
- test["flavor"],
- test["image"],
- test["allow_threads"])[0]
+ topo_test["flavor"],
+ topo_test["image"],
+ topo_test["allow_threads"])[0]
+
+ self.assertEqual(topo_test["expect"][0], topology.sockets)
+ self.assertEqual(topo_test["expect"][1], topology.cores)
+ self.assertEqual(topo_test["expect"][2], topology.threads)
+
+
+class NUMATopologyTest(test.NoDBTestCase):
+
+ def test_topology_constraints(self):
+ testdata = [
+ {
+ "flavor": FakeFlavor(8, 2048, {
+ }),
+ "image": {
+ },
+ "expect": None,
+ },
+ {
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2
+ }),
+ "image": {
+ },
+ "expect": hw.VirtNUMAInstanceTopology(
+ [
+ hw.VirtNUMATopologyCell(0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCell(1, set([4, 5, 6, 7]), 1024),
+ ]),
+ },
+ {
+ # vcpus is not a multiple of nodes, so it
+ # is an error to not provide cpu/mem mapping
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 3
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyAsymmetric,
+ },
+ {
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 3,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.0": "1024",
+ "hw:numa_cpus.1": "4,6",
+ "hw:numa_mem.1": "512",
+ "hw:numa_cpus.2": "5,7",
+ "hw:numa_mem.2": "512",
+ }),
+ "image": {
+ },
+ "expect": hw.VirtNUMAInstanceTopology(
+ [
+ hw.VirtNUMATopologyCell(0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCell(1, set([4, 6]), 512),
+ hw.VirtNUMATopologyCell(2, set([5, 7]), 512),
+ ]),
+ },
+ {
+ # Request a CPU that is out of range
+ # wrt vCPU count
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 1,
+ "hw:numa_cpus.0": "0-16",
+ "hw:numa_mem.0": "2048",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyCPUOutOfRange,
+ },
+ {
+ # Request the same CPU in two nodes
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-7",
+ "hw:numa_mem.0": "1024",
+ "hw:numa_cpus.1": "0-7",
+ "hw:numa_mem.1": "1024",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyCPUDuplicates,
+ },
+ {
+ # Request with some CPUs not assigned
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-2",
+ "hw:numa_mem.0": "1024",
+ "hw:numa_cpus.1": "3-4",
+ "hw:numa_mem.1": "1024",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyCPUsUnassigned,
+ },
+ {
+ # Request too little memory vs flavor total
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.0": "512",
+ "hw:numa_cpus.1": "4-7",
+ "hw:numa_mem.1": "512",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyMemoryOutOfRange,
+ },
+ {
+ # Request too much memory vs flavor total
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.0": "1576",
+ "hw:numa_cpus.1": "4-7",
+ "hw:numa_mem.1": "1576",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyMemoryOutOfRange,
+ },
+ {
+ # Request missing mem.0
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_cpus.0": "0-3",
+ "hw:numa_mem.1": "1576",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyIncomplete,
+ },
+ {
+ # Request missing cpu.0
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ "hw:numa_mem.0": "1576",
+ "hw:numa_cpus.1": "4-7",
+ }),
+ "image": {
+ },
+ "expect": exception.ImageNUMATopologyIncomplete,
+ },
+ {
+ # Image attempts to override flavor
+ "flavor": FakeFlavor(8, 2048, {
+ "hw:numa_nodes": 2,
+ }),
+ "image": {
+ "hw_numa_nodes": 4,
+ },
+ "expect": exception.ImageNUMATopologyForbidden,
+ },
+ ]
+
+ for testitem in testdata:
+ if testitem["expect"] is None:
+ topology = hw.VirtNUMAInstanceTopology.get_constraints(
+ testitem["flavor"], testitem["image"])
+ self.assertIsNone(topology)
+ elif type(testitem["expect"]) == type:
+ self.assertRaises(testitem["expect"],
+ hw.VirtNUMAInstanceTopology.get_constraints,
+ testitem["flavor"],
+ testitem["image"])
+ else:
+ topology = hw.VirtNUMAInstanceTopology.get_constraints(
+ testitem["flavor"], testitem["image"])
+ self.assertEqual(len(testitem["expect"].cells),
+ len(topology.cells))
+ for i in range(len(topology.cells)):
+ self.assertEqual(testitem["expect"].cells[i].cpuset,
+ topology.cells[i].cpuset)
+ self.assertEqual(testitem["expect"].cells[i].memory,
+ topology.cells[i].memory)
+
+ def test_host_usage_contiguous(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellUsage(1, set([4, 6]), 512),
+ hw.VirtNUMATopologyCellUsage(2, set([5, 7]), 512),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCell(0, set([0, 1, 2]), 256),
+ hw.VirtNUMATopologyCell(1, set([4]), 256),
+ ])
+ instance2 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCell(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCell(1, set([5, 7]), 256),
+ ])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [instance1, instance2])
+
+ self.assertEqual(len(hosttopo), len(hostusage))
+
+ self.assertIsInstance(hostusage.cells[0],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[0].cpuset,
+ hostusage.cells[0].cpuset)
+ self.assertEqual(hosttopo.cells[0].memory,
+ hostusage.cells[0].memory)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 5)
+ self.assertEqual(hostusage.cells[0].memory_usage, 512)
+
+ self.assertIsInstance(hostusage.cells[1],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[1].cpuset,
+ hostusage.cells[1].cpuset)
+ self.assertEqual(hosttopo.cells[1].memory,
+ hostusage.cells[1].memory)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 3)
+ self.assertEqual(hostusage.cells[1].memory_usage, 512)
+
+ self.assertIsInstance(hostusage.cells[2],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[2].cpuset,
+ hostusage.cells[2].cpuset)
+ self.assertEqual(hosttopo.cells[2].memory,
+ hostusage.cells[2].memory)
+ self.assertEqual(hostusage.cells[2].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[2].memory_usage, 0)
+
+ def test_host_usage_sparse(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1, 2, 3]), 1024),
+ hw.VirtNUMATopologyCellUsage(5, set([4, 6]), 512),
+ hw.VirtNUMATopologyCellUsage(6, set([5, 7]), 512),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCell(0, set([0, 1, 2]), 256),
+ hw.VirtNUMATopologyCell(6, set([4]), 256),
+ ])
+ instance2 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCell(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCell(5, set([5, 7]), 256),
+ ])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [instance1, instance2])
+
+ self.assertEqual(len(hosttopo), len(hostusage))
+
+ self.assertIsInstance(hostusage.cells[0],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[0].id,
+ hostusage.cells[0].id)
+ self.assertEqual(hosttopo.cells[0].cpuset,
+ hostusage.cells[0].cpuset)
+ self.assertEqual(hosttopo.cells[0].memory,
+ hostusage.cells[0].memory)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 5)
+ self.assertEqual(hostusage.cells[0].memory_usage, 512)
+
+ self.assertIsInstance(hostusage.cells[1],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[1].id,
+ hostusage.cells[1].id)
+ self.assertEqual(hosttopo.cells[1].cpuset,
+ hostusage.cells[1].cpuset)
+ self.assertEqual(hosttopo.cells[1].memory,
+ hostusage.cells[1].memory)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 2)
+ self.assertEqual(hostusage.cells[1].memory_usage, 256)
+
+ self.assertIsInstance(hostusage.cells[2],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hosttopo.cells[2].cpuset,
+ hostusage.cells[2].cpuset)
+ self.assertEqual(hosttopo.cells[2].memory,
+ hostusage.cells[2].memory)
+ self.assertEqual(hostusage.cells[2].cpu_usage, 1)
+ self.assertEqual(hostusage.cells[2].memory_usage, 256)
+
+ def test_host_usage_culmulative_with_free(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(
+ 0, set([0, 1, 2, 3]), 1024, cpu_usage=2, memory_usage=512),
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([4, 6]), 512, cpu_usage=1, memory_usage=512),
+ hw.VirtNUMATopologyCellUsage(2, set([5, 7]), 256),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCell(0, set([0, 1, 2]), 512),
+ hw.VirtNUMATopologyCell(1, set([3]), 256),
+ hw.VirtNUMATopologyCell(2, set([4]), 256)])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [instance1])
+ self.assertIsInstance(hostusage.cells[0],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 5)
+ self.assertEqual(hostusage.cells[0].memory_usage, 1024)
+
+ self.assertIsInstance(hostusage.cells[1],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 2)
+ self.assertEqual(hostusage.cells[1].memory_usage, 768)
+
+ self.assertIsInstance(hostusage.cells[2],
+ hw.VirtNUMATopologyCellUsage)
+ self.assertEqual(hostusage.cells[2].cpu_usage, 1)
+ self.assertEqual(hostusage.cells[2].memory_usage, 256)
+
+ # Test freeing of resources
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hostusage, [instance1], free=True)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 2)
+ self.assertEqual(hostusage.cells[0].memory_usage, 512)
+
+ self.assertEqual(hostusage.cells[1].cpu_usage, 1)
+ self.assertEqual(hostusage.cells[1].memory_usage, 512)
+
+ self.assertEqual(hostusage.cells[2].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[2].memory_usage, 0)
+
+ def test_topo_usage_none(self):
+ hosttopo = hw.VirtNUMAHostTopology([
+ hw.VirtNUMATopologyCellUsage(0, set([0, 1]), 512),
+ hw.VirtNUMATopologyCellUsage(1, set([2, 3]), 512),
+ ])
+ instance1 = hw.VirtNUMAInstanceTopology([
+ hw.VirtNUMATopologyCell(0, set([0, 1]), 256),
+ hw.VirtNUMATopologyCell(2, set([2]), 256),
+ ])
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ None, [instance1])
+ self.assertIsNone(hostusage)
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, [])
+ self.assertEqual(hostusage.cells[0].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[0].memory_usage, 0)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[1].memory_usage, 0)
+
+ hostusage = hw.VirtNUMAHostTopology.usage_from_instances(
+ hosttopo, None)
+ self.assertEqual(hostusage.cells[0].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[0].memory_usage, 0)
+ self.assertEqual(hostusage.cells[1].cpu_usage, 0)
+ self.assertEqual(hostusage.cells[1].memory_usage, 0)
+
+ def _test_to_dict(self, cell_or_topo, expected):
+ got = cell_or_topo._to_dict()
+ self.assertThat(expected, matchers.DictMatches(got))
+
+ def assertNUMACellMatches(self, expected_cell, got_cell):
+ attrs = ('cpuset', 'memory', 'id')
+ if isinstance(expected_cell, hw.VirtNUMAHostTopology):
+ attrs += ('cpu_usage', 'memory_usage')
+
+ for attr in attrs:
+ self.assertEqual(getattr(expected_cell, attr),
+ getattr(got_cell, attr))
+
+ def _test_cell_from_dict(self, data_dict, expected_cell):
+ cell_class = expected_cell.__class__
+ got_cell = cell_class._from_dict(data_dict)
+ self.assertNUMACellMatches(expected_cell, got_cell)
+
+ def _test_topo_from_dict(self, data_dict, expected_topo, with_usage=False):
+ topology_class = (
+ hw.VirtNUMAHostTopology
+ if with_usage else hw.VirtNUMAInstanceTopology)
+ got_topo = topology_class._from_dict(
+ data_dict)
+ for got_cell, expected_cell in zip(
+ got_topo.cells, expected_topo.cells):
+ self.assertNUMACellMatches(expected_cell, got_cell)
+
+ def test_numa_cell_dict(self):
+ cell = hw.VirtNUMATopologyCell(1, set([1, 2]), 512)
+ cell_dict = {'cpus': '1,2',
+ 'mem': {'total': 512},
+ 'id': 1}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_cell_usage_dict(self):
+ cell = hw.VirtNUMATopologyCellUsage(1, set([1, 2]), 512)
+ cell_dict = {'cpus': '1,2', 'cpu_usage': 0,
+ 'mem': {'total': 512, 'used': 0},
+ 'id': 1}
+ self._test_to_dict(cell, cell_dict)
+ self._test_cell_from_dict(cell_dict, cell)
+
+ def test_numa_instance_topo_dict(self):
+ topo = hw.VirtNUMAInstanceTopology(
+ cells=[
+ hw.VirtNUMATopologyCell(1, set([1, 2]), 1024),
+ hw.VirtNUMATopologyCell(2, set([3, 4]), 1024)])
+ topo_dict = {'cells': [
+ {'cpus': '1,2',
+ 'mem': {'total': 1024},
+ 'id': 1},
+ {'cpus': '3,4',
+ 'mem': {'total': 1024},
+ 'id': 2}]}
+ self._test_to_dict(topo, topo_dict)
+ self._test_topo_from_dict(topo_dict, topo, with_usage=False)
+
+ def test_numa_topo_dict_with_usage(self):
+ topo = hw.VirtNUMAHostTopology(
+ cells=[
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([1, 2]), 1024),
+ hw.VirtNUMATopologyCellUsage(
+ 2, set([3, 4]), 1024)])
+ topo_dict = {'cells': [
+ {'cpus': '1,2', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 1},
+ {'cpus': '3,4', 'cpu_usage': 0,
+ 'mem': {'total': 1024, 'used': 0},
+ 'id': 2}]}
+ self._test_to_dict(topo, topo_dict)
+ self._test_topo_from_dict(topo_dict, topo, with_usage=True)
+
+ def test_json(self):
+ expected = hw.VirtNUMAHostTopology(
+ cells=[
+ hw.VirtNUMATopologyCellUsage(
+ 1, set([1, 2]), 1024),
+ hw.VirtNUMATopologyCellUsage(
+ 2, set([3, 4]), 1024)])
+ got = hw.VirtNUMAHostTopology.from_json(expected.to_json())
- self.assertEqual(test["expect"][0], topology.sockets)
- self.assertEqual(test["expect"][1], topology.cores)
- self.assertEqual(test["expect"][2], topology.threads)
+ for exp_cell, got_cell in zip(expected.cells, got.cells):
+ self.assertNUMACellMatches(exp_cell, got_cell)
diff --git a/nova/tests/virt/test_imagecache.py b/nova/tests/virt/test_imagecache.py
index 42058e9124..693b0625d6 100644
--- a/nova/tests/virt/test_imagecache.py
+++ b/nova/tests/virt/test_imagecache.py
@@ -16,6 +16,7 @@
from nova.compute import vm_states
from nova import test
+from nova.tests import fake_instance
from nova.virt import imagecache
CONF = cfg.CONF
@@ -24,13 +25,11 @@
class ImageCacheManagerTests(test.NoDBTestCase):
def test_configurationi_defaults(self):
- self.assertEqual(CONF.image_cache_manager_interval,
- 2400)
- self.assertEqual(CONF.image_cache_subdirectory_name,
- '_base')
+ self.assertEqual(2400, CONF.image_cache_manager_interval)
+ self.assertEqual('_base', CONF.image_cache_subdirectory_name)
self.assertTrue(CONF.remove_unused_base_images)
- self.assertEqual(CONF.remove_unused_original_minimum_age_seconds,
- 24 * 3600)
+ self.assertEqual(24 * 3600,
+ CONF.remove_unused_original_minimum_age_seconds)
def test_cache_manager(self):
cache_manager = imagecache.ImageCacheManager()
@@ -40,33 +39,36 @@ def test_cache_manager(self):
self.assertRaises(NotImplementedError,
cache_manager._get_base)
base_images = cache_manager._list_base_images(None)
- self.assertEqual(base_images['unexplained_images'], [])
- self.assertEqual(base_images['originals'], [])
+ self.assertEqual([], base_images['unexplained_images'])
+ self.assertEqual([], base_images['originals'])
self.assertRaises(NotImplementedError,
cache_manager._age_and_verify_cached_images,
None, [], None)
def test_list_running_instances(self):
- all_instances = [{'image_ref': '1',
- 'host': CONF.host,
- 'name': 'inst-1',
- 'uuid': '123',
- 'vm_state': '',
- 'task_state': ''},
- {'image_ref': '2',
- 'host': CONF.host,
- 'name': 'inst-2',
- 'uuid': '456',
- 'vm_state': '',
- 'task_state': ''},
- {'image_ref': '2',
- 'kernel_id': '21',
- 'ramdisk_id': '22',
- 'host': 'remotehost',
- 'name': 'inst-3',
- 'uuid': '789',
- 'vm_state': '',
- 'task_state': ''}]
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'id': '1',
+ 'uuid': '123',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '2',
+ 'host': CONF.host,
+ 'id': '2',
+ 'uuid': '456',
+ 'vm_state': '',
+ 'task_state': ''},
+ {'image_ref': '2',
+ 'kernel_id': '21',
+ 'ramdisk_id': '22',
+ 'host': 'remotehost',
+ 'id': '3',
+ 'uuid': '789',
+ 'vm_state': '',
+ 'task_state': ''}]
+
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
image_cache_manager = imagecache.ImageCacheManager()
@@ -74,38 +76,47 @@ def test_list_running_instances(self):
running = image_cache_manager._list_running_instances(None,
all_instances)
- self.assertEqual(len(running['used_images']), 4)
- self.assertEqual(running['used_images']['1'], (1, 0, ['inst-1']))
- self.assertEqual(running['used_images']['2'], (1, 1, ['inst-2',
- 'inst-3']))
- self.assertEqual(running['used_images']['21'], (0, 1, ['inst-3']))
- self.assertEqual(running['used_images']['22'], (0, 1, ['inst-3']))
-
- self.assertIn('inst-1', running['instance_names'])
+ self.assertEqual(4, len(running['used_images']))
+ self.assertEqual((1, 0, ['instance-00000001']),
+ running['used_images']['1'])
+ self.assertEqual((1, 1, ['instance-00000002',
+ 'instance-00000003']),
+ running['used_images']['2'])
+ self.assertEqual((0, 1, ['instance-00000003']),
+ running['used_images']['21'])
+ self.assertEqual((0, 1, ['instance-00000003']),
+ running['used_images']['22'])
+
+ self.assertIn('instance-00000001', running['instance_names'])
self.assertIn('123', running['instance_names'])
- self.assertEqual(len(running['image_popularity']), 4)
- self.assertEqual(running['image_popularity']['1'], 1)
- self.assertEqual(running['image_popularity']['2'], 2)
- self.assertEqual(running['image_popularity']['21'], 1)
- self.assertEqual(running['image_popularity']['22'], 1)
+ self.assertEqual(4, len(running['image_popularity']))
+ self.assertEqual(1, running['image_popularity']['1'])
+ self.assertEqual(2, running['image_popularity']['2'])
+ self.assertEqual(1, running['image_popularity']['21'])
+ self.assertEqual(1, running['image_popularity']['22'])
def test_list_resizing_instances(self):
- all_instances = [{'image_ref': '1',
- 'host': CONF.host,
- 'name': 'inst-1',
- 'uuid': '123',
- 'vm_state': vm_states.RESIZED,
- 'task_state': None}]
+ instances = [{'image_ref': '1',
+ 'host': CONF.host,
+ 'id': '1',
+ 'uuid': '123',
+ 'vm_state': vm_states.RESIZED,
+ 'task_state': None}]
+
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
image_cache_manager = imagecache.ImageCacheManager()
running = image_cache_manager._list_running_instances(None,
all_instances)
- self.assertEqual(len(running['used_images']), 1)
- self.assertEqual((1, 0, ['inst-1']), running['used_images']['1'])
- self.assertEqual(set(['inst-1', '123', 'inst-1_resize', '123_resize']),
+ self.assertEqual(1, len(running['used_images']))
+ self.assertEqual((1, 0, ['instance-00000001']),
+ running['used_images']['1'])
+ self.assertEqual(set(['instance-00000001', '123',
+ 'instance-00000001_resize', '123_resize']),
running['instance_names'])
- self.assertEqual(len(running['image_popularity']), 1)
- self.assertEqual(running['image_popularity']['1'], 1)
+ self.assertEqual(1, len(running['image_popularity']))
+ self.assertEqual(1, running['image_popularity']['1'])
diff --git a/nova/tests/virt/test_ironic_api_contracts.py b/nova/tests/virt/test_ironic_api_contracts.py
index b63a8dd632..730ba942a2 100644
--- a/nova/tests/virt/test_ironic_api_contracts.py
+++ b/nova/tests/virt/test_ironic_api_contracts.py
@@ -103,7 +103,7 @@ def test_ComputeDriver_signatures(self):
self._check_method(driver.ComputeDriver.power_off,
"ComputeDriver.power_off",
- ['self', 'instance'])
+ ['self', 'instance', 'timeout', 'retry_interval'])
self._check_method(driver.ComputeDriver.power_on,
"ComputeDriver.power_on",
diff --git a/nova/tests/virt/test_virt_drivers.py b/nova/tests/virt/test_virt_drivers.py
index 183282db6d..409ff86fc6 100644
--- a/nova/tests/virt/test_virt_drivers.py
+++ b/nova/tests/virt/test_virt_drivers.py
@@ -23,16 +23,20 @@
from nova.compute import manager
from nova import exception
+from nova import objects
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
+from nova.openstack.common import timeutils
from nova import test
+from nova.tests import fake_block_device
from nova.tests.image import fake as fake_image
from nova.tests import utils as test_utils
from nova.tests.virt.libvirt import fake_libvirt_utils
-from nova.tests.virt.libvirt import test_driver
+from nova.virt import block_device as driver_block_device
from nova.virt import event as virtevent
from nova.virt import fake
+from nova.virt import libvirt
from nova.virt.libvirt import imagebackend
LOG = logging.getLogger(__name__)
@@ -260,6 +264,12 @@ def test_snapshot_running(self):
self.connection.snapshot(self.ctxt, instance_ref, img_ref['id'],
lambda *args, **kwargs: None)
+ @catch_notimplementederror
+ def test_post_interrupted_snapshot_cleanup(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.post_interrupted_snapshot_cleanup(self.ctxt,
+ instance_ref)
+
@catch_notimplementederror
def test_reboot(self):
reboot_type = "SOFT"
@@ -437,7 +447,7 @@ def test_swap_volume(self):
self.connection.swap_volume({'driver_volume_type': 'fake'},
{'driver_volume_type': 'fake'},
instance_ref,
- '/dev/sda'))
+ '/dev/sda', 2))
@catch_notimplementederror
def test_attach_detach_different_power_states(self):
@@ -454,24 +464,29 @@ def test_attach_detach_different_power_states(self):
'root_device_name': None,
'swap': None,
'ephemerals': [],
- 'block_device_mapping': [
- test_driver.mocked_bdm(1, {
- 'instance_uuid': instance_ref['uuid'],
- 'connection_info': {'driver_volume_type': 'fake'},
- 'mount_device': '/dev/sda',
+ 'block_device_mapping': driver_block_device.convert_volumes([
+ fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 1, 'instance_uuid': instance_ref['uuid'],
+ 'device_name': '/dev/sda',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
'delete_on_termination': False,
- 'virtual_name': None,
'snapshot_id': None,
'volume_id': 'abcdedf',
'volume_size': None,
'no_device': None
}),
- ]
+ ])
}
- self.connection.power_on(self.ctxt, instance_ref, network_info, bdm)
- self.connection.detach_volume(connection_info,
- instance_ref,
- '/dev/sda')
+ bdm['block_device_mapping'][0]['connection_info'] = (
+ {'driver_volume_type': 'fake'})
+ with mock.patch.object(
+ driver_block_device.DriverVolumeBlockDevice, 'save'):
+ self.connection.power_on(
+ self.ctxt, instance_ref, network_info, bdm)
+ self.connection.detach_volume(connection_info,
+ instance_ref,
+ '/dev/sda')
@catch_notimplementederror
def test_get_info(self):
@@ -497,6 +512,7 @@ def test_get_diagnostics(self):
@catch_notimplementederror
def test_get_instance_diagnostics(self):
instance_ref, network_info = self._get_running_instance(obj=True)
+ instance_ref['launched_at'] = timeutils.utcnow()
self.connection.get_instance_diagnostics(instance_ref)
@catch_notimplementederror
@@ -603,12 +619,12 @@ def _check_available_resource_fields(self, host_status):
'supported_instances']
for key in keys:
self.assertIn(key, host_status)
+ self.assertIsInstance(host_status['hypervisor_version'], int)
@catch_notimplementederror
def test_get_host_stats(self):
host_status = self.connection.get_host_stats()
self._check_available_resource_fields(host_status)
- self.assertIsInstance(host_status['hypervisor_version'], int)
@catch_notimplementederror
def test_get_available_resource(self):
@@ -784,8 +800,7 @@ def test_internal_set_host_enabled(self):
# Previous status of the service: disabled: False
service_mock.configure_mock(disabled_reason='None',
disabled=False)
- from nova.objects import service as service_obj
- with mock.patch.object(service_obj.Service, "get_by_compute_host",
+ with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(False, 'ERROR!')
self.assertTrue(service_mock.disabled)
@@ -798,8 +813,7 @@ def test_set_host_enabled_when_auto_disabled(self):
# Previous status of the service: disabled: True, 'AUTO: ERROR'
service_mock.configure_mock(disabled_reason='AUTO: ERROR',
disabled=True)
- from nova.objects import service as service_obj
- with mock.patch.object(service_obj.Service, "get_by_compute_host",
+ with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(True)
self.assertFalse(service_mock.disabled)
@@ -812,8 +826,7 @@ def test_set_host_enabled_when_manually_disabled(self):
# Previous status of the service: disabled: True, 'Manually disabled'
service_mock.configure_mock(disabled_reason='Manually disabled',
disabled=True)
- from nova.objects import service as service_obj
- with mock.patch.object(service_obj.Service, "get_by_compute_host",
+ with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(True)
self.assertTrue(service_mock.disabled)
@@ -826,9 +839,26 @@ def test_set_host_enabled_dont_override_manually_disabled(self):
# Previous status of the service: disabled: True, 'Manually disabled'
service_mock.configure_mock(disabled_reason='Manually disabled',
disabled=True)
- from nova.objects import service as service_obj
- with mock.patch.object(service_obj.Service, "get_by_compute_host",
+ with mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock):
self.connection._set_host_enabled(False, 'ERROR!')
self.assertTrue(service_mock.disabled)
self.assertEqual(service_mock.disabled_reason, 'Manually disabled')
+
+ @catch_notimplementederror
+ @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
+ def test_unplug_vifs_with_destroy_vifs_false(self, unplug_vifs_mock):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.cleanup(self.ctxt, instance_ref, network_info,
+ destroy_vifs=False)
+ self.assertEqual(unplug_vifs_mock.call_count, 0)
+
+ @catch_notimplementederror
+ @mock.patch.object(libvirt.driver.LibvirtDriver, '_unplug_vifs')
+ def test_unplug_vifs_with_destroy_vifs_true(self, unplug_vifs_mock):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.cleanup(self.ctxt, instance_ref, network_info,
+ destroy_vifs=True)
+ self.assertEqual(unplug_vifs_mock.call_count, 1)
+ unplug_vifs_mock.assert_called_once_with(instance_ref,
+ network_info, True)
diff --git a/nova/tests/virt/vmwareapi/fake.py b/nova/tests/virt/vmwareapi/fake.py
index 5f46abb581..1c7032320c 100644
--- a/nova/tests/virt/vmwareapi/fake.py
+++ b/nova/tests/virt/vmwareapi/fake.py
@@ -23,12 +23,13 @@
import pprint
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
@@ -38,6 +39,7 @@
_FAKE_FILE_SIZE = 1024
_db_content = {}
+_array_types = {}
LOG = logging.getLogger(__name__)
@@ -48,7 +50,7 @@ def log_db_contents(msg=None):
{'text': msg or "", 'content': pprint.pformat(_db_content)})
-def reset(vc=False):
+def reset():
"""Resets the db contents."""
cleanup()
create_network()
@@ -56,16 +58,13 @@ def reset(vc=False):
create_host_storage_system()
ds_ref1 = create_datastore('ds1', 1024, 500)
create_host(ds_ref=ds_ref1)
- if vc:
- ds_ref2 = create_datastore('ds2', 1024, 500)
- create_host(ds_ref=ds_ref2)
+ ds_ref2 = create_datastore('ds2', 1024, 500)
+ create_host(ds_ref=ds_ref2)
create_datacenter('dc1', ds_ref1)
- if vc:
- create_datacenter('dc2', ds_ref2)
+ create_datacenter('dc2', ds_ref2)
create_res_pool()
- if vc:
- create_cluster('test_cluster', ds_ref1)
- create_cluster('test_cluster2', ds_ref2)
+ create_cluster('test_cluster', ds_ref1)
+ create_cluster('test_cluster2', ds_ref2)
def cleanup():
@@ -111,6 +110,24 @@ def _convert_to_array_of_opt_val(optvals):
return array_of_optv
+def _create_array_of_type(t):
+ """Returns an array to contain objects of type t."""
+ if t in _array_types:
+ return _array_types[t]()
+
+ array_type_name = 'ArrayOf%s' % t
+ array_type = type(array_type_name, (DataObject,), {})
+
+ def __init__(self):
+ super(array_type, self).__init__(array_type_name)
+ setattr(self, t, [])
+
+ setattr(array_type, '__init__', __init__)
+
+ _array_types[t] = array_type
+ return array_type()
+
+
class FakeRetrieveResult(object):
"""Object to retrieve a ObjectContent list."""
@@ -397,7 +414,11 @@ def __init__(self, **kwargs):
self.set("summary.config.numCpu", kwargs.get("numCpu", 1))
self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
self.set("summary.config.instanceUuid", kwargs.get("instanceUuid"))
- self.set("config.hardware.device", kwargs.get("virtual_device", None))
+
+ devices = _create_array_of_type('VirtualDevice')
+ devices.VirtualDevice = kwargs.get("virtual_device", [])
+ self.set("config.hardware.device", devices)
+
exconfig_do = kwargs.get("extra_config", None)
self.set("config.extraConfig",
_convert_to_array_of_opt_val(exconfig_do))
@@ -405,7 +426,7 @@ def __init__(self, **kwargs):
for optval in exconfig_do:
self.set('config.extraConfig["%s"]' % optval.key, optval)
self.set('runtime.host', kwargs.get("runtime_host", None))
- self.device = kwargs.get("virtual_device")
+ self.device = kwargs.get("virtual_device", [])
# Sample of diagnostics data is below.
config = [
('template', False),
@@ -439,6 +460,20 @@ def __init__(self, **kwargs):
('featureRequirement', [key1, key2])]
self.set("summary.runtime", runtime)
+ def _update_extra_config(self, extra):
+ extra_config = self.get("config.extraConfig")
+ values = extra_config.OptionValue
+ for value in values:
+ if value.key == extra.key:
+ value.value = extra.value
+ return
+ kv = DataObject()
+ kv.key = extra.key
+ kv.value = extra.value
+ extra_config.OptionValue.append(kv)
+ self.set("config.extraConfig", extra_config)
+ extra_config = self.get("config.extraConfig")
+
def reconfig(self, factory, val):
"""Called to reconfigure the VM. Actually customizes the property
setting of the Virtual Machine object.
@@ -462,6 +497,11 @@ def reconfig(self, factory, val):
if not hasattr(val, 'deviceChange'):
return
+ if hasattr(val, 'extraConfig'):
+ # there are 2 cases - new entry or update an existing one
+ for extra in val.extraConfig:
+ self._update_extra_config(extra)
+
if len(val.deviceChange) < 2:
return
@@ -480,8 +520,9 @@ def reconfig(self, factory, val):
controller = VirtualLsiLogicController()
controller.key = controller_key
- self.set("config.hardware.device", [disk, controller,
- self.device[0]])
+ devices = _create_array_of_type('VirtualDevice')
+ devices.VirtualDevice = [disk, controller, self.device[0]]
+ self.set("config.hardware.device", devices)
except AttributeError:
pass
@@ -620,13 +661,15 @@ def _update_summary(self):
class Datastore(ManagedObject):
"""Datastore class."""
- def __init__(self, name="fake-ds", capacity=1024, free=500):
+ def __init__(self, name="fake-ds", capacity=1024, free=500,
+ accessible=True, maintenance_mode="normal"):
super(Datastore, self).__init__("ds")
self.set("summary.type", "VMFS")
self.set("summary.name", name)
self.set("summary.capacity", capacity * units.Gi)
self.set("summary.freeSpace", free * units.Gi)
- self.set("summary.accessible", True)
+ self.set("summary.accessible", accessible)
+ self.set("summary.maintenanceMode", maintenance_mode)
self.set("browser", "")
@@ -956,13 +999,6 @@ def fake_upload_image(context, image, instance, **kwargs):
pass
-def fake_get_vmdk_size_and_properties(context, image_id, instance):
- """Fakes the file size and properties fetch for the image file."""
- props = {"vmware_ostype": constants.DEFAULT_OS_TYPE,
- "vmware_adaptertype": constants.DEFAULT_ADAPTER_TYPE}
- return _FAKE_FILE_SIZE, props
-
-
def _get_vm_mdo(vm_ref):
"""Gets the Virtual Machine with the ref from the db."""
if _db_content.get("VirtualMachine", None) is None:
@@ -994,6 +1030,21 @@ def create(self, obj_name):
return DataObject(obj_name)
+class FakeService(DataObject):
+ """Fake service class."""
+
+ def Logout(self, session_manager):
+ pass
+
+
+class FakeClient(DataObject):
+ """Fake client class."""
+
+ def __init__(self):
+ """Creates a namespace object."""
+ self.service = FakeService()
+
+
class FakeSession(object):
"""Fake Session Class."""
@@ -1036,7 +1087,7 @@ def __init__(self, protocol="https", host="localhost", trace=None):
contents and the cookies for the session.
"""
self._session = None
- self.client = DataObject()
+ self.client = FakeClient()
self.client.factory = FakeFactory()
transport = DataObject()
@@ -1106,7 +1157,20 @@ def _create_vm(self, method, *args, **kwargs):
"""Creates and registers a VM object with the Host System."""
config_spec = kwargs.get("config")
pool = kwargs.get('pool')
- ds = _db_content["Datastore"].keys()[0]
+
+ vm_path = ds_util.DatastorePath.parse(config_spec.files.vmPathName)
+ for key, value in _db_content["Datastore"].iteritems():
+ if value.get('summary.name') == vm_path.datastore:
+ ds = key
+ break
+ else:
+ ds = create_datastore(vm_path.datastore, 1024, 500)
+
+ devices = []
+ for device_change in config_spec.deviceChange:
+ if device_change.operation == 'add':
+ devices.append(device_change.device)
+
host = _db_content["HostSystem"].keys()[0]
vm_dict = {"name": config_spec.name,
"ds": [ds],
@@ -1116,7 +1180,7 @@ def _create_vm(self, method, *args, **kwargs):
"numCpu": config_spec.numCPUs,
"mem": config_spec.memoryMB,
"extra_config": config_spec.extraConfig,
- "virtual_device": config_spec.deviceChange,
+ "virtual_device": devices,
"instanceUuid": config_spec.instanceUuid}
virtual_machine = VirtualMachine(**vm_dict)
_create_object("VirtualMachine", virtual_machine)
@@ -1197,7 +1261,8 @@ def _clone_vm(self, method, *args, **kwargs):
"numCpu": source_vm_mdo.get("summary.config.numCpu"),
"mem": source_vm_mdo.get("summary.config.memorySizeMB"),
"extra_config": source_vm_mdo.get("config.extraConfig").OptionValue,
- "virtual_device": source_vm_mdo.get("config.hardware.device"),
+ "virtual_device":
+ source_vm_mdo.get("config.hardware.device").VirtualDevice,
"instanceUuid": source_vm_mdo.get("summary.config.instanceUuid")}
if clone_spec.config is not None:
@@ -1445,8 +1510,6 @@ def __getattr__(self, attr_name):
elif attr_name == "FindAllByUuid":
return lambda *args, **kwargs: self._find_all_by_uuid(attr_name,
*args, **kwargs)
- elif attr_name == "Rename_Task":
- return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "SearchDatastore_Task":
return lambda *args, **kwargs: self._search_ds(attr_name,
*args, **kwargs)
@@ -1465,8 +1528,6 @@ def __getattr__(self, attr_name):
elif attr_name == "CancelRetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties_cancel(
attr_name, *args, **kwargs)
- elif attr_name == "AcquireCloneTicket":
- return lambda *args, **kwargs: self._just_return()
elif attr_name == "AddPortGroup":
return lambda *args, **kwargs: self._add_port_group(attr_name,
*args, **kwargs)
@@ -1474,8 +1535,6 @@ def __getattr__(self, attr_name):
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "ShutdownHost_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
- elif attr_name == "PowerDownHostToStandBy_Task":
- return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "PowerUpHostFromStandBy_Task":
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "EnterMaintenanceMode_Task":
diff --git a/nova/tests/virt/vmwareapi/stubs.py b/nova/tests/virt/vmwareapi/stubs.py
index 440fff3dff..44bbe26363 100644
--- a/nova/tests/virt/vmwareapi/stubs.py
+++ b/nova/tests/virt/vmwareapi/stubs.py
@@ -25,7 +25,6 @@
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import network_util
-from nova.virt.vmwareapi import vmware_images
def fake_get_vim_object(arg):
@@ -65,8 +64,6 @@ def set_stubs(stubs):
"""Set the stubs."""
stubs.Set(network_util, 'get_network_with_the_name',
fake.fake_get_network)
- stubs.Set(vmware_images, 'get_vmdk_size_and_properties',
- fake.fake_get_vmdk_size_and_properties)
stubs.Set(driver.VMwareAPISession, "_get_vim_object",
fake_get_vim_object)
stubs.Set(driver.VMwareAPISession, "_is_vim_object",
diff --git a/nova/tests/virt/vmwareapi/test_configdrive.py b/nova/tests/virt/vmwareapi/test_configdrive.py
index e957dbdb02..f9a1e7949d 100644
--- a/nova/tests/virt/vmwareapi/test_configdrive.py
+++ b/nova/tests/virt/vmwareapi/test_configdrive.py
@@ -14,7 +14,6 @@
# under the License.
import contextlib
-import copy
import fixtures
import mock
@@ -23,6 +22,7 @@
from nova import context
from nova.image import glance
from nova import test
+from nova.tests import fake_instance
import nova.tests.image.fake
from nova.tests import utils
from nova.tests.virt.vmwareapi import fake as vmwareapi_fake
@@ -47,7 +47,7 @@ def setUp(self):
host_password='test_pass',
use_linked_clone=False, group='vmware')
self.flags(vnc_enabled=False)
- vmwareapi_fake.reset(vc=True)
+ vmwareapi_fake.reset()
stubs.set_stubs(self.stubs)
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI)
@@ -56,28 +56,29 @@ def setUp(self):
self.node_name = '%s(%s)' % (self.conn.dict_mors.keys()[0],
cluster_name)
image_ref = nova.tests.image.fake.get_valid_image_id()
- self.test_instance = {'vm_state': 'building',
- 'project_id': 'fake',
- 'user_id': 'fake',
- 'name': '1',
- 'kernel_id': '1',
- 'ramdisk_id': '1',
- 'mac_addresses': [
- {'address': 'de:ad:be:ef:be:ef'}
- ],
- 'memory_mb': 8192,
- 'flavor': 'm1.large',
- 'vcpus': 4,
- 'root_gb': 80,
- 'image_ref': image_ref,
- 'host': 'fake_host',
- 'task_state':
- 'scheduling',
- 'reservation_id': 'r-3t8muvr0',
- 'id': 1,
- 'uuid': 'fake-uuid',
- 'node': self.node_name,
- 'metadata': []}
+ instance_values = {
+ 'vm_state': 'building',
+ 'project_id': 'fake',
+ 'user_id': 'fake',
+ 'name': '1',
+ 'kernel_id': '1',
+ 'ramdisk_id': '1',
+ 'mac_addresses': [{'address': 'de:ad:be:ef:be:ef'}],
+ 'memory_mb': 8192,
+ 'flavor': 'm1.large',
+ 'vcpus': 4,
+ 'root_gb': 80,
+ 'image_ref': image_ref,
+ 'host': 'fake_host',
+ 'task_state': 'scheduling',
+ 'reservation_id': 'r-3t8muvr0',
+ 'id': 1,
+ 'uuid': 'fake-uuid',
+ 'node': self.node_name,
+ 'metadata': []
+ }
+ self.test_instance = fake_instance.fake_instance_obj(self.context,
+ **instance_values)
(image_service, image_id) = glance.get_remote_image_service(context,
image_ref)
@@ -123,7 +124,7 @@ def _spawn_vm(self, injected_files=None, admin_password=None,
injected_files = injected_files or []
read_file_handle = mock.MagicMock()
write_file_handle = mock.MagicMock()
- self.image_ref = self.instance['image_ref']
+ self.image_ref = self.test_instance.image_ref
def fake_read_handle(read_iter):
return read_file_handle
@@ -149,7 +150,7 @@ def fake_write_handle(host, dc_name, ds_name, cookies,
side_effect=fake_read_handle),
mock.patch.object(vmware_images, 'start_transfer')
) as (fake_http_write, fake_glance_read, fake_start_transfer):
- self.conn.spawn(self.context, self.instance, self.image,
+ self.conn.spawn(self.context, self.test_instance, self.image,
injected_files=injected_files,
admin_password=admin_password,
network_info=self.network_info,
@@ -159,11 +160,10 @@ def fake_write_handle(host, dc_name, ds_name, cookies,
write_file_handle=write_file_handle)
def test_create_vm_with_config_drive_verify_method_invocation(self):
- self.instance = copy.deepcopy(self.test_instance)
- self.instance['config_drive'] = True
+ self.test_instance.config_drive = 'True'
self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive')
self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
- self.conn._vmops._create_config_drive(self.instance,
+ self.conn._vmops._create_config_drive(self.test_instance,
mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
@@ -183,8 +183,7 @@ def test_create_vm_with_config_drive_verify_method_invocation(self):
self._spawn_vm()
def test_create_vm_without_config_drive(self):
- self.instance = copy.deepcopy(self.test_instance)
- self.instance['config_drive'] = False
+ self.test_instance.config_drive = None
self.mox.StubOutWithMock(vmops.VMwareVMOps, '_create_config_drive')
self.mox.StubOutWithMock(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
self.mox.ReplayAll()
@@ -194,6 +193,5 @@ def test_create_vm_without_config_drive(self):
self._spawn_vm()
def test_create_vm_with_config_drive(self):
- self.instance = copy.deepcopy(self.test_instance)
- self.instance['config_drive'] = True
+ self.test_instance.config_drive = 'True'
self._spawn_vm()
diff --git a/nova/tests/virt/vmwareapi/test_driver_api.py b/nova/tests/virt/vmwareapi/test_driver_api.py
index a8b06d285c..ddb1d7966a 100644
--- a/nova/tests/virt/vmwareapi/test_driver_api.py
+++ b/nova/tests/virt/vmwareapi/test_driver_api.py
@@ -38,6 +38,7 @@
from nova import context
from nova import exception
from nova.image import glance
+from nova.network import model as network_model
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova.openstack.common import units
@@ -53,12 +54,13 @@
from nova.tests.virt.vmwareapi import stubs
from nova import utils as nova_utils
from nova.virt import driver as v_driver
-from nova.virt import fake
+from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import read_write_util
+from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@@ -119,6 +121,43 @@ def _fake_create_session(inst):
inst._session = session
+class VMwareDriverStartupTestCase(test.NoDBTestCase):
+ def _start_driver_with_flags(self, expected_exception_type, startup_flags):
+ self.flags(**startup_flags)
+ with mock.patch(
+ 'nova.virt.vmwareapi.driver.VMwareAPISession.__init__'):
+ e = self.assertRaises(
+ Exception, driver.VMwareVCDriver, None) # noqa
+ self.assertIs(type(e), expected_exception_type)
+
+ def test_start_driver_no_user(self):
+ self._start_driver_with_flags(
+ Exception,
+ dict(host_ip='ip', host_password='password',
+ group='vmware'))
+
+ def test_start_driver_no_host(self):
+ self._start_driver_with_flags(
+ Exception,
+ dict(host_username='username', host_password='password',
+ group='vmware'))
+
+ def test_start_driver_no_password(self):
+ self._start_driver_with_flags(
+ Exception,
+ dict(host_ip='ip', host_username='username',
+ group='vmware'))
+
+ def test_start_driver_with_user_host_password(self):
+ # Getting the InvalidInput exception signifies that no exception
+ # is raised regarding missing user/password/host
+ self._start_driver_with_flags(
+ nova.exception.InvalidInput,
+ dict(host_ip='ip', host_password='password',
+ host_username="user", datastore_regex="bad(regex",
+ group='vmware'))
+
+
class VMwareSessionTestCase(test.NoDBTestCase):
def _fake_is_vim_object(self, module):
@@ -305,10 +344,12 @@ def setUp(self):
super(VMwareAPIVMTestCase, self).setUp()
vm_util.vm_refs_cache_reset()
self.context = context.RequestContext('fake', 'fake', is_admin=False)
- self.flags(host_ip='test_url',
+ cluster_name = 'test_cluster'
+ cluster_name2 = 'test_cluster2'
+ self.flags(cluster_name=[cluster_name, cluster_name2],
+ host_ip='test_url',
host_username='test_username',
host_password='test_pass',
- datastore_regex='.*',
api_retry_count=1,
use_linked_clone=False, group='vmware')
self.flags(vnc_enabled=False,
@@ -316,13 +357,17 @@ def setUp(self):
my_ip='')
self.user_id = 'fake'
self.project_id = 'fake'
- self.node_name = 'test_url'
- self.ds = 'ds1'
self.context = context.RequestContext(self.user_id, self.project_id)
stubs.set_stubs(self.stubs)
vmwareapi_fake.reset()
nova.tests.image.fake.stub_out_image_service(self.stubs)
- self.conn = driver.VMwareESXDriver(fake.FakeVirtAPI)
+ self.conn = driver.VMwareVCDriver(None, False)
+ self.node_name = self.conn._resources.keys()[0]
+ self.node_name2 = self.conn._resources.keys()[1]
+ if cluster_name2 in self.node_name2:
+ self.ds = 'ds1'
+ else:
+ self.ds = 'ds2'
self.vim = vmwareapi_fake.FakeVim()
# NOTE(vish): none of the network plugging code is actually
@@ -339,7 +384,7 @@ def setUp(self):
}
self.fake_image_uuid = self.image['id']
nova.tests.image.fake.stub_out_image_service(self.stubs)
- self.vnc_host = 'test_url'
+ self.vnc_host = 'ha-host'
self._set_exception_vars()
self.instance_without_compute = {'node': None,
'vm_state': 'building',
@@ -370,12 +415,54 @@ def tearDown(self):
vmwareapi_fake.cleanup()
nova.tests.image.fake.FakeImageService_reset()
+ def test_get_host_ip_addr(self):
+ self.assertEqual('test_url', self.conn.get_host_ip_addr())
+
+ def test_init_host_with_no_session(self):
+ self.conn._session = mock.Mock()
+ self.conn._session.vim = None
+ self.conn.init_host('fake_host')
+ self.conn._session._create_session.assert_called_once_with()
+
+ def test_init_host(self):
+ try:
+ self.conn.init_host("fake_host")
+ except Exception as ex:
+ self.fail("init_host raised: %s" % ex)
+
def _set_exception_vars(self):
self.wait_task = self.conn._session._wait_for_task
self.call_method = self.conn._session._call_method
self.task_ref = None
self.exception = False
+ def test_cleanup_host(self):
+ self.conn.init_host("fake_host")
+ try:
+ self.conn.cleanup_host("fake_host")
+ except Exception as ex:
+ self.fail("cleanup_host raised: %s" % ex)
+
+ @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
+ def test_cleanup_host_direct(self, mock_init):
+ mock_init.return_value = None
+ vcdriver = driver.VMwareVCDriver(None, False)
+ vcdriver._session = mock.Mock()
+ vcdriver.cleanup_host("foo")
+ vcdriver._session.vim.get_service_content.assert_called_once_with()
+ vcdriver._session.vim.client.service.Logout.assert_called_once_with(
+ vcdriver._session.vim.get_service_content().sessionManager
+ )
+
+ @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
+ def test_cleanup_host_direct_with_bad_logout(self, mock_init):
+ mock_init.return_value = None
+ vcdriver = driver.VMwareVCDriver(None, False)
+ vcdriver._session = mock.Mock()
+ fault = suds.WebFault(mock.Mock(), mock.Mock())
+ vcdriver._session.vim.client.service.Logout.side_effect = fault
+ vcdriver.cleanup_host("foo")
+
def test_driver_capabilities(self):
self.assertTrue(self.conn.capabilities['has_imagecache'])
self.assertFalse(self.conn.capabilities['supports_recreate'])
@@ -548,7 +635,7 @@ def _check_vm_record(self, num_instances=1, powered_on=True):
self.type_data['memory_mb'])
self.assertEqual(
- vm.get("config.hardware.device")[2].device.obj_name,
+ vm.get("config.hardware.device").VirtualDevice[2].obj_name,
"ns0:VirtualE1000")
if powered_on:
# Check that the VM is running according to Nova
@@ -565,7 +652,8 @@ def _check_vm_record(self, num_instances=1, powered_on=True):
found_vm_uuid = False
found_iface_id = False
- for c in vm.get("config.extraConfig").OptionValue:
+ extras = vm.get("config.extraConfig")
+ for c in extras.OptionValue:
if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']):
found_vm_uuid = True
if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"):
@@ -610,73 +698,103 @@ def test_list_instance_uuids_invalid_uuid(self):
self.assertEqual(len(uuids), 0)
def _cached_files_exist(self, exists=True):
- cache = ('[%s] vmware_base/%s/%s.vmdk' %
- (self.ds, self.fake_image_uuid, self.fake_image_uuid))
+ cache = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.vmdk' % self.fake_image_uuid)
if exists:
- self.assertTrue(vmwareapi_fake.get_file(cache))
+ self.assertTrue(vmwareapi_fake.get_file(str(cache)))
else:
- self.assertFalse(vmwareapi_fake.get_file(cache))
+ self.assertFalse(vmwareapi_fake.get_file(str(cache)))
- def test_instance_dir_disk_created(self):
+ @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ 'from_image')
+ def test_instance_dir_disk_created(self, mock_from_image):
"""Test image file is cached when even when use_linked_clone
is False
"""
+ img_props = vmware_images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ linked_clone=False)
+ mock_from_image.return_value = img_props
self._create_vm()
- inst_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
- self.assertTrue(vmwareapi_fake.get_file(inst_file_path))
+ path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
self._cached_files_exist()
- def test_cache_dir_disk_created(self):
+ @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ 'from_image')
+ def test_cache_dir_disk_created(self, mock_from_image):
"""Test image disk is cached when use_linked_clone is True."""
self.flags(use_linked_clone=True, group='vmware')
+
+ img_props = vmware_images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=1 * units.Ki,
+ disk_type=constants.DISK_TYPE_SPARSE)
+
+ mock_from_image.return_value = img_props
+
self._create_vm()
- file = '[%s] vmware_base/%s/%s.vmdk' % (self.ds, self.fake_image_uuid,
- self.fake_image_uuid)
- root = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
- self.fake_image_uuid,
- self.fake_image_uuid)
- self.assertTrue(vmwareapi_fake.get_file(file))
- self.assertTrue(vmwareapi_fake.get_file(root))
+ path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.vmdk' % self.fake_image_uuid)
+ root = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80.vmdk' % self.fake_image_uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
+ self.assertTrue(vmwareapi_fake.get_file(str(root)))
def _iso_disk_type_created(self, instance_type='m1.large'):
self.image['disk_format'] = 'iso'
self._create_vm(instance_type=instance_type)
- file = '[%s] vmware_base/%s/%s.iso' % (self.ds, self.fake_image_uuid,
- self.fake_image_uuid)
- self.assertTrue(vmwareapi_fake.get_file(file))
+ path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.iso' % self.fake_image_uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
def test_iso_disk_type_created(self):
self._iso_disk_type_created()
- vmdk_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
- self.assertTrue(vmwareapi_fake.get_file(vmdk_file_path))
+ path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(path)))
def test_iso_disk_type_created_with_root_gb_0(self):
self._iso_disk_type_created(instance_type='m1.micro')
- vmdk_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
- self.assertFalse(vmwareapi_fake.get_file(vmdk_file_path))
+ path = ds_util.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(str(path)))
def test_iso_disk_cdrom_attach(self):
- self.iso_path = (
- '[%s] vmware_base/%s/%s.iso' % (self.ds, self.fake_image_uuid,
- self.fake_image_uuid))
+ iso_path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.iso' % self.fake_image_uuid)
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
- self.assertEqual(iso_uploaded_path, self.iso_path)
+ self.assertEqual(iso_uploaded_path, str(iso_path))
self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
fake_attach_cdrom)
self.image['disk_format'] = 'iso'
self._create_vm()
- def test_iso_disk_cdrom_attach_with_config_drive(self):
+ @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ 'from_image')
+ def test_iso_disk_cdrom_attach_with_config_drive(self,
+ mock_from_image):
+ img_props = vmware_images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=80 * units.Gi,
+ file_type='iso',
+ linked_clone=False)
+
+ mock_from_image.return_value = img_props
+
self.flags(force_config_drive=True)
- self.iso_path = [
- '[%s] vmware_base/%s/%s.iso' %
- (self.ds, self.fake_image_uuid, self.fake_image_uuid),
- '[%s] fake-config-drive' % self.ds]
- self.iso_unit_nos = [0, 1]
+ iso_path = [
+ ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.iso' % self.fake_image_uuid),
+ ds_util.DatastorePath(self.ds, 'fake-config-drive')]
self.iso_index = 0
def fake_create_config_drive(instance, injected_files, password,
@@ -685,7 +803,7 @@ def fake_create_config_drive(instance, injected_files, password,
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
- self.assertEqual(iso_uploaded_path, self.iso_path[self.iso_index])
+ self.assertEqual(iso_uploaded_path, str(iso_path[self.iso_index]))
self.iso_index += 1
self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
@@ -699,7 +817,8 @@ def fake_attach_cdrom(vm_ref, instance, data_store_ref,
def test_cdrom_attach_with_config_drive(self):
self.flags(force_config_drive=True)
- self.iso_path = '[%s] fake-config-drive' % self.ds
+
+ iso_path = ds_util.DatastorePath(self.ds, 'fake-config-drive')
self.cd_attach_called = False
def fake_create_config_drive(instance, injected_files, password,
@@ -708,7 +827,7 @@ def fake_create_config_drive(instance, injected_files, password,
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
- self.assertEqual(iso_uploaded_path, self.iso_path)
+ self.assertEqual(iso_uploaded_path, str(iso_path))
self.cd_attach_called = True
self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
@@ -761,6 +880,18 @@ def test_spawn_no_power_on(self):
def test_spawn_power_on(self):
self._spawn_power_state(True)
+ def test_spawn_root_size_0(self):
+ self._create_vm(instance_type='m1.micro')
+ info = self.conn.get_info({'uuid': self.uuid,
+ 'node': self.instance_node})
+ self._check_vm_info(info, power_state.RUNNING)
+ cache = ('[%s] vmware_base/%s/%s.vmdk' %
+ (self.ds, self.fake_image_uuid, self.fake_image_uuid))
+ gb_cache = ('[%s] vmware_base/%s/%s.0.vmdk' %
+ (self.ds, self.fake_image_uuid, self.fake_image_uuid))
+ self.assertTrue(vmwareapi_fake.get_file(cache))
+ self.assertFalse(vmwareapi_fake.get_file(gb_cache))
+
def _spawn_with_delete_exception(self, fault=None):
def fake_call_method(module, method, *args, **kwargs):
@@ -813,12 +944,12 @@ def test_spawn_disk_extend(self):
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_disk_extend_exists(self):
- root = ('[%s] vmware_base/%s/%s.80.vmdk' %
- (self.ds, self.fake_image_uuid, self.fake_image_uuid))
- self.root = root
+ root = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80.vmdk' % self.fake_image_uuid)
def _fake_extend(instance, requested_size, name, dc_ref):
- vmwareapi_fake._add_file(self.root)
+ vmwareapi_fake._add_file(str(root))
self.stubs.Set(self.conn._vmops, '_extend_virtual_disk',
_fake_extend)
@@ -827,59 +958,34 @@ def _fake_extend(instance, requested_size, name, dc_ref):
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
- self.assertTrue(vmwareapi_fake.get_file(root))
-
- def test_spawn_disk_extend_sparse(self):
- self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties')
- result = [1024, {"vmware_ostype": "otherGuest",
- "vmware_adaptertype": "lsiLogic",
- "vmware_disktype": "sparse"}]
- vmware_images.get_vmdk_size_and_properties(
- mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(result)
- self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk')
- requested_size = 80 * units.Mi
- self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(),
- requested_size, mox.IgnoreArg(), mox.IgnoreArg())
- self.mox.ReplayAll()
- self._create_vm()
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.RUNNING)
+ self.assertTrue(vmwareapi_fake.get_file(str(root)))
- def test_spawn_disk_extend_insufficient_disk_space(self):
- self.flags(use_linked_clone=True, group='vmware')
- self.wait_task = self.conn._session._wait_for_task
- self.call_method = self.conn._session._call_method
- self.task_ref = None
- id = self.fake_image_uuid
- cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
- id, id)
- tmp_file = '[%s] vmware_base/%s/%s.80-flat.vmdk' % (self.ds,
- id, id)
-
- def fake_wait_for_task(task_ref):
- if task_ref == self.task_ref:
- self.task_ref = None
- self.assertTrue(vmwareapi_fake.get_file(cached_image))
- self.assertTrue(vmwareapi_fake.get_file(tmp_file))
- raise exception.NovaException('No space!')
- return self.wait_task(task_ref)
+ @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ 'from_image')
+ def test_spawn_disk_extend_sparse(self, mock_from_image):
+ img_props = vmware_images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=units.Ki,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=True)
- def fake_call_method(module, method, *args, **kwargs):
- task_ref = self.call_method(module, method, *args, **kwargs)
- if method == "ExtendVirtualDisk_Task":
- self.task_ref = task_ref
- return task_ref
+ mock_from_image.return_value = img_props
- self.stubs.Set(self.conn._session, "_call_method", fake_call_method)
- self.stubs.Set(self.conn._session, "_wait_for_task",
- fake_wait_for_task)
-
- self.assertRaises(exception.NovaException,
- self._create_vm)
- self.assertFalse(vmwareapi_fake.get_file(cached_image))
- self.assertFalse(vmwareapi_fake.get_file(tmp_file))
+ with contextlib.nested(
+ mock.patch.object(self.conn._vmops, '_extend_virtual_disk'),
+ mock.patch.object(self.conn._vmops, 'get_datacenter_ref_and_name'),
+ ) as (mock_extend, mock_get_dc):
+ dc_val = mock.Mock()
+ dc_val.ref = "fake_dc_ref"
+ dc_val.name = "dc1"
+ mock_get_dc.return_value = dc_val
+ self._create_vm()
+ iid = img_props.image_id
+ cached_image = ds_util.DatastorePath(self.ds, 'vmware_base',
+ iid, '%s.80.vmdk' % iid)
+ mock_extend.assert_called_once_with(
+ self.instance, self.instance.root_gb * units.Mi,
+ str(cached_image), "fake_dc_ref")
def test_spawn_disk_extend_failed_copy(self):
# Spawn instance
@@ -995,28 +1101,63 @@ def fake_call_method(module, method, *args, **kwargs):
self.assertRaises(DeleteError, self._create_vm)
self.assertTrue(vmwareapi_fake.get_file(cached_image))
- def test_spawn_disk_invalid_disk_size(self):
- self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties')
- result = [82 * units.Gi,
- {"vmware_ostype": "otherGuest",
- "vmware_adaptertype": "lsiLogic",
- "vmware_disktype": "sparse"}]
- vmware_images.get_vmdk_size_and_properties(
- mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(result)
- self.mox.ReplayAll()
+ @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ 'from_image')
+ def test_spawn_disk_invalid_disk_size(self, mock_from_image):
+ img_props = vmware_images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=82 * units.Gi,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=True)
+
+ mock_from_image.return_value = img_props
+
self.assertRaises(exception.InstanceUnacceptable,
self._create_vm)
- def test_spawn_invalid_disk_format(self):
- self._create_instance()
- self.image['disk_format'] = 'invalid'
- self.assertRaises(exception.InvalidDiskFormat,
- self.conn.spawn, self.context,
- self.instance, self.image,
- injected_files=[], admin_password=None,
- network_info=self.network_info,
- block_device_info=None)
+ @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ 'from_image')
+ def test_spawn_disk_extend_insufficient_disk_space(self, mock_from_image):
+ img_props = vmware_images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=1024,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=True)
+
+ mock_from_image.return_value = img_props
+
+ cached_image = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80.vmdk' %
+ self.fake_image_uuid)
+ tmp_file = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ '%s.80-flat.vmdk' %
+ self.fake_image_uuid)
+
+ NoDiskSpace = error_util.get_fault_class('NoDiskSpace')
+
+ def fake_wait_for_task(task_ref):
+ if task_ref == self.task_ref:
+ self.task_ref = None
+ raise NoDiskSpace()
+ return self.wait_task(task_ref)
+
+ def fake_call_method(module, method, *args, **kwargs):
+ task_ref = self.call_method(module, method, *args, **kwargs)
+ if method == 'ExtendVirtualDisk_Task':
+ self.task_ref = task_ref
+ return task_ref
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ fake_wait_for_task),
+ mock.patch.object(self.conn._session, '_call_method',
+ fake_call_method)
+ ) as (mock_wait_for_task, mock_call_method):
+ self.assertRaises(NoDiskSpace, self._create_vm)
+ self.assertFalse(vmwareapi_fake.get_file(str(cached_image)))
+ self.assertFalse(vmwareapi_fake.get_file(str(tmp_file)))
def test_spawn_with_move_file_exists_exception(self):
# The test will validate that the spawn completes
@@ -1151,12 +1292,6 @@ def _spawn_attach_volume_vmdk(self, set_image_ref=True, vc_support=False):
network_info=self.network_info,
block_device_info=block_device_info)
- def test_spawn_attach_volume_vmdk(self):
- self._spawn_attach_volume_vmdk()
-
- def test_spawn_attach_volume_vmdk_no_image_ref(self):
- self._spawn_attach_volume_vmdk(set_image_ref=False)
-
def test_spawn_attach_volume_iscsi(self):
self._create_instance()
self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
@@ -1280,6 +1415,37 @@ def test_snapshot_delete_vm_snapshot(self):
self._test_snapshot()
+ def _snapshot_delete_vm_snapshot_exception(self, exception, call_count=1):
+ self._create_vm()
+ fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj
+ snapshot_ref = vmwareapi_fake.ManagedObjectReference(
+ value="Snapshot-123",
+ name="VirtualMachineSnapshot")
+
+ with contextlib.nested(
+ mock.patch.object(self.conn._session, '_wait_for_task',
+ side_effect=exception),
+ mock.patch.object(time, 'sleep')
+ ) as (_fake_wait, _fake_sleep):
+ if exception != error_util.TaskInProgress:
+ self.assertRaises(exception,
+ self.conn._vmops._delete_vm_snapshot,
+ self.instance, fake_vm, snapshot_ref)
+ self.assertEqual(0, _fake_sleep.call_count)
+ else:
+ self.conn._vmops._delete_vm_snapshot(self.instance, fake_vm,
+ snapshot_ref)
+ self.assertEqual(call_count - 1, _fake_sleep.call_count)
+ self.assertEqual(call_count, _fake_wait.call_count)
+
+ def test_snapshot_delete_vm_snapshot_exception(self):
+ self._snapshot_delete_vm_snapshot_exception(exception.NovaException)
+
+ def test_snapshot_delete_vm_snapshot_exception_retry(self):
+ self.flags(api_retry_count=5, group='vmware')
+ self._snapshot_delete_vm_snapshot_exception(error_util.TaskInProgress,
+ 5)
+
def test_reboot(self):
self._create_vm()
info = self.conn.get_info({'name': 1, 'uuid': self.uuid,
@@ -1409,15 +1575,6 @@ def test_power_off_non_existent(self):
self.assertRaises(exception.InstanceNotFound, self.conn.power_off,
self.instance)
- def test_power_off_suspended(self):
- self._create_vm()
- self.conn.suspend(self.instance)
- info = self.conn.get_info({'uuid': self.uuid,
- 'node': self.instance_node})
- self._check_vm_info(info, power_state.SUSPENDED)
- self.assertRaises(exception.InstancePowerOffFailure,
- self.conn.power_off, self.instance)
-
def test_resume_state_on_host_boot(self):
self._create_vm()
self.mox.StubOutWithMock(vm_util, 'get_vm_state_from_name')
@@ -1461,12 +1618,12 @@ def destroy_rescued(self, fake_method):
) as (fake_detach, fake_power_on):
self.instance['vm_state'] = vm_states.RESCUED
self.conn.destroy(self.context, self.instance, self.network_info)
- inst_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
- self.assertFalse(vmwareapi_fake.get_file(inst_path))
- rescue_file_path = '[%s] %s-rescue/%s-rescue.vmdk' % (self.ds,
- self.uuid,
- self.uuid)
- self.assertFalse(vmwareapi_fake.get_file(rescue_file_path))
+ inst_path = ds_util.DatastorePath(self.ds, self.uuid,
+ '%s.vmdk' % self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(str(inst_path)))
+ rescue_file_path = ds_util.DatastorePath(
+ self.ds, '%s-rescue' % self.uuid, '%s-rescue.vmdk' % self.uuid)
+ self.assertFalse(vmwareapi_fake.get_file(str(rescue_file_path)))
# Unrescue does not power on with destroy
self.assertFalse(fake_power_on.called)
@@ -1515,7 +1672,6 @@ def test_destroy_non_existent(self):
self.network_info,
None, self.destroy_disks)
mock_destroy.assert_called_once_with(self.instance,
- self.network_info,
self.destroy_disks)
def test_destroy_instance_without_compute(self):
@@ -1560,7 +1716,8 @@ def fake_create_config_drive(instance, injected_files, password,
data_store_name, folder,
instance_uuid, cookies):
self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
- return "[%s] %s/fake.iso" % (data_store_name, instance_uuid)
+ return str(ds_util.DatastorePath(data_store_name,
+ instance_uuid, 'fake.iso'))
self.stubs.Set(self.conn._vmops, '_create_config_drive',
fake_create_config_drive)
@@ -1605,12 +1762,13 @@ def _fake_http_write(host, data_center_name, datastore_name,
def test_rescue(self):
self._rescue()
- inst_file_path = '[%s] %s/%s.vmdk' % (self.ds, self.uuid, self.uuid)
- self.assertTrue(vmwareapi_fake.get_file(inst_file_path))
- rescue_file_path = '[%s] %s-rescue/%s-rescue.vmdk' % (self.ds,
- self.uuid,
- self.uuid)
- self.assertTrue(vmwareapi_fake.get_file(rescue_file_path))
+ inst_file_path = ds_util.DatastorePath(self.ds, self.uuid,
+ '%s.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(inst_file_path)))
+ rescue_file_path = ds_util.DatastorePath(self.ds,
+ '%s-rescue' % self.uuid,
+ '%s-rescue.vmdk' % self.uuid)
+ self.assertTrue(vmwareapi_fake.get_file(str(rescue_file_path)))
def test_rescue_with_config_drive(self):
self.flags(force_config_drive=True)
@@ -1621,14 +1779,18 @@ def test_unrescue(self):
# with power_on=True, the test_destroy_rescued tests the
# vmops.unrescue with power_on=False
self._rescue()
- self.test_vm_ref = None
- self.test_device_name = None
vm_ref = vm_util.get_vm_ref(self.conn._session,
self.instance)
+ vm_rescue_ref = vm_util.get_vm_ref_from_name(self.conn._session,
+ '%s-rescue' % self.uuid)
- def fake_power_off_vm_ref(vm_ref):
- self.test_vm_ref = vm_ref
- self.assertIsNotNone(vm_ref)
+ self.poweroff_instance = vm_util.power_off_instance
+
+ def fake_power_off_instance(session, instance, vm_ref):
+ # This is called so that we actually poweroff the simulated vm.
+ # The reason for this is that there is a validation in destroy
+ # that the instance is not powered on.
+ self.poweroff_instance(session, instance, vm_ref)
def fake_detach_disk_from_vm(vm_ref, instance,
device_name, destroy_disk=False):
@@ -1637,15 +1799,16 @@ def fake_detach_disk_from_vm(vm_ref, instance,
self._check_vm_info(info, power_state.SHUTDOWN)
with contextlib.nested(
- mock.patch.object(self.conn._vmops, "_power_off_vm_ref",
- side_effect=fake_power_off_vm_ref),
+ mock.patch.object(vm_util, "power_off_instance",
+ side_effect=fake_power_off_instance),
mock.patch.object(self.conn._volumeops, "detach_disk_from_vm",
side_effect=fake_detach_disk_from_vm),
mock.patch.object(vm_util, "power_on_instance"),
) as (poweroff, detach, fake_power_on):
self.conn.unrescue(self.instance, None)
- poweroff.assert_called_once_with(self.test_vm_ref)
- detach.assert_called_once_with(self.test_vm_ref, mock.ANY,
+ poweroff.assert_called_once_with(self.conn._session, mock.ANY,
+ vm_rescue_ref)
+ detach.assert_called_once_with(vm_rescue_ref, mock.ANY,
self.test_device_name)
fake_power_on.assert_called_once_with(self.conn._session,
self.instance,
@@ -1653,15 +1816,6 @@ def fake_detach_disk_from_vm(vm_ref, instance,
self.test_vm_ref = None
self.test_device_name = None
- def test_pause(self):
- # Tests that the VMwareESXDriver does not implement the pause method.
- self.assertRaises(NotImplementedError, self.conn.pause, instance=None)
-
- def test_unpause(self):
- # Tests that the VMwareESXDriver does not implement the unpause method.
- self.assertRaises(NotImplementedError, self.conn.unpause,
- instance=None)
-
def test_get_diagnostics(self):
self._create_vm()
expected = {'memoryReservation': 0, 'suspendInterval': 0,
@@ -1681,6 +1835,22 @@ def test_get_diagnostics(self):
'node': self.instance_node}),
matchers.DictMatches(expected))
+ def test_get_instance_diagnostics(self):
+ self._create_vm()
+ expected = {'uptime': 0,
+ 'memory_details': {'used': 0, 'maximum': 512},
+ 'nic_details': [],
+ 'driver': 'vmwareapi',
+ 'state': 'running',
+ 'version': '1.0',
+ 'cpu_details': [],
+ 'disk_details': [],
+ 'hypervisor_os': 'esxi',
+ 'config_drive': False}
+ actual = self.conn.get_instance_diagnostics(
+ {'name': 1, 'uuid': self.uuid, 'node': self.instance_node})
+ self.assertThat(actual.serialize(), matchers.DictMatches(expected))
+
def test_get_console_output(self):
self.assertRaises(NotImplementedError, self.conn.get_console_output,
None, None)
@@ -1697,12 +1867,6 @@ def _test_finish_migration(self, power_on, resize_instance=False):
image_meta=None,
power_on=power_on)
- def test_confirm_migration(self):
- self._create_vm()
- self.assertRaises(NotImplementedError,
- self.conn.confirm_migration, self.context,
- self.instance, None)
-
def _test_finish_revert_migration(self, power_on):
self._create_vm()
# Ensure ESX driver throws an error
@@ -1712,18 +1876,6 @@ def _test_finish_revert_migration(self, power_on):
instance=self.instance,
network_info=None)
- def test_finish_revert_migration_power_on(self):
- self._test_finish_revert_migration(power_on=True)
-
- def test_finish_revert_migration_power_off(self):
- self._test_finish_revert_migration(power_on=False)
-
- def test_get_console_pool_info(self):
- info = self.conn.get_console_pool_info("console_type")
- self.assertEqual(info['address'], 'test_url')
- self.assertEqual(info['username'], 'test_username')
- self.assertEqual(info['password'], 'test_pass')
-
def test_get_vnc_console_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound,
@@ -1751,9 +1903,6 @@ def test_get_vnc_console_noport(self):
self.context,
self.instance)
- def test_host_ip_addr(self):
- self.assertEqual(self.conn.get_host_ip_addr(), "test_url")
-
def test_get_volume_connector(self):
self._create_vm()
connector_dict = self.conn.get_volume_connector(self.instance)
@@ -1989,13 +2138,13 @@ def _fake_get_timestamp_filename(fake):
_fake_get_timestamp_filename)
def _timestamp_file_exists(self, exists=True):
- timestamp = ('[%s] vmware_base/%s/%s/' %
- (self.ds, self.fake_image_uuid,
- self._get_timestamp_filename()))
+ timestamp = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid,
+ self._get_timestamp_filename() + '/')
if exists:
- self.assertTrue(vmwareapi_fake.get_file(timestamp))
+ self.assertTrue(vmwareapi_fake.get_file(str(timestamp)))
else:
- self.assertFalse(vmwareapi_fake.get_file(timestamp))
+ self.assertFalse(vmwareapi_fake.get_file(str(timestamp)))
def _image_aging_image_marked_for_deletion(self):
self._create_vm(uuid=uuidutils.generate_uuid())
@@ -2022,9 +2171,9 @@ def test_timestamp_file_removed_spawn(self):
def test_timestamp_file_removed_aging(self):
self._timestamp_file_removed()
ts = self._get_timestamp_filename()
- ts_path = ('[%s] vmware_base/%s/%s/' %
- (self.ds, self.fake_image_uuid, ts))
- vmwareapi_fake._add_file(ts_path)
+ ts_path = ds_util.DatastorePath(self.ds, 'vmware_base',
+ self.fake_image_uuid, ts + '/')
+ vmwareapi_fake._add_file(str(ts_path))
self._timestamp_file_exists()
all_instances = [self.instance]
self.conn.manage_image_cache(self.context, all_instances)
@@ -2058,65 +2207,6 @@ def test_image_aging_not_aged(self):
self._cached_files_exist()
-class VMwareAPIHostTestCase(test.NoDBTestCase,
- test_driver.DriverAPITestHelper):
- """Unit tests for Vmware API host calls."""
-
- def setUp(self):
- super(VMwareAPIHostTestCase, self).setUp()
- self.flags(image_cache_subdirectory_name='vmware_base')
- vm_util.vm_refs_cache_reset()
- self.flags(host_ip='test_url',
- host_username='test_username',
- host_password='test_pass', group='vmware')
- vmwareapi_fake.reset()
- stubs.set_stubs(self.stubs)
- self.conn = driver.VMwareESXDriver(False)
-
- def tearDown(self):
- super(VMwareAPIHostTestCase, self).tearDown()
- vmwareapi_fake.cleanup()
-
- def test_public_api_signatures(self):
- self.assertPublicAPISignatures(self.conn)
-
- def test_host_state(self):
- stats = self.conn.get_host_stats()
- self.assertEqual(stats['vcpus'], 16)
- self.assertEqual(stats['disk_total'], 1024)
- self.assertEqual(stats['disk_available'], 500)
- self.assertEqual(stats['disk_used'], 1024 - 500)
- self.assertEqual(stats['host_memory_total'], 1024)
- self.assertEqual(stats['host_memory_free'], 1024 - 500)
- self.assertEqual(stats['hypervisor_version'], 5000000)
- supported_instances = [('i686', 'vmware', 'hvm'),
- ('x86_64', 'vmware', 'hvm')]
- self.assertEqual(stats['supported_instances'], supported_instances)
-
- def _test_host_action(self, method, action, expected=None):
- result = method('host', action)
- self.assertEqual(result, expected)
-
- def test_host_reboot(self):
- self._test_host_action(self.conn.host_power_action, 'reboot')
-
- def test_host_shutdown(self):
- self._test_host_action(self.conn.host_power_action, 'shutdown')
-
- def test_host_startup(self):
- self._test_host_action(self.conn.host_power_action, 'startup')
-
- def test_host_maintenance_on(self):
- self._test_host_action(self.conn.host_maintenance_mode, True)
-
- def test_host_maintenance_off(self):
- self._test_host_action(self.conn.host_maintenance_mode, False)
-
- def test_get_host_uptime(self):
- result = self.conn.get_host_uptime('host')
- self.assertEqual('Please refer to test_url for the uptime', result)
-
-
class VMwareAPIVCDriverTestCase(VMwareAPIVMTestCase,
test_driver.DriverAPITestHelper):
@@ -2130,7 +2220,7 @@ def setUp(self):
task_poll_interval=10, datastore_regex='.*', group='vmware')
self.flags(vnc_enabled=False,
image_cache_subdirectory_name='vmware_base')
- vmwareapi_fake.reset(vc=True)
+ vmwareapi_fake.reset()
self.conn = driver.VMwareVCDriver(None, False)
self.node_name = self.conn._resources.keys()[0]
self.node_name2 = self.conn._resources.keys()[1]
@@ -2175,36 +2265,6 @@ def side_effect():
vcdriver._session._create_session.side_effect = side_effect
return vcdriver
- @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
- def test_init_host_and_cleanup_host(self, mock_init):
- vcdriver = self._setup_mocks_for_session(mock_init)
- vcdriver.init_host("foo")
- vcdriver._session._create_session.assert_called_once_with()
-
- vcdriver.cleanup_host("foo")
- vcdriver._session.vim.client.service.Logout.assert_called_once_with(
- mock.ANY)
-
- @mock.patch('nova.virt.vmwareapi.driver.LOG')
- @mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.__init__')
- def test_cleanup_host_with_no_login(self, mock_init, mock_logger):
- vcdriver = self._setup_mocks_for_session(mock_init)
- vcdriver.init_host("foo")
- vcdriver._session._create_session.assert_called_once_with()
-
- # Not logged in...
- # observe that no exceptions were thrown
- mock_sc = mock.Mock()
- vcdriver._session.vim.retrieve_service_content.return_value = mock_sc
- web_fault = suds.WebFault(mock.Mock(), mock.Mock())
- vcdriver._session.vim.client.service.Logout.side_effect = web_fault
- vcdriver.cleanup_host("foo")
-
- # assert that the mock Logout method was never called
- vcdriver._session.vim.client.service.Logout.assert_called_once_with(
- mock.ANY)
- mock_logger.debug.assert_called_once_with(mock.ANY)
-
def test_host_power_action(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'host', 'action')
@@ -2299,13 +2359,16 @@ def test_snapshot_using_file_manager(self):
uuidutils.generate_uuid().AndReturn(uuid_str)
self.mox.StubOutWithMock(ds_util, 'file_delete')
+ disk_ds_path = ds_util.DatastorePath(
+ self.ds, "vmware_temp", "%s.vmdk" % uuid_str)
+ disk_ds_flat_path = ds_util.DatastorePath(
+ self.ds, "vmware_temp", "%s-flat.vmdk" % uuid_str)
# Check calls for delete vmdk and -flat.vmdk pair
- ds_util.file_delete(mox.IgnoreArg(),
- "[%s] vmware_temp/%s-flat.vmdk" % (self.ds, uuid_str),
- mox.IgnoreArg()).AndReturn(None)
- ds_util.file_delete(mox.IgnoreArg(),
- "[%s] vmware_temp/%s.vmdk" % (self.ds, uuid_str),
+ ds_util.file_delete(
+ mox.IgnoreArg(), disk_ds_flat_path,
mox.IgnoreArg()).AndReturn(None)
+ ds_util.file_delete(
+ mox.IgnoreArg(), disk_ds_path, mox.IgnoreArg()).AndReturn(None)
self.mox.ReplayAll()
self._test_snapshot()
@@ -2318,28 +2381,20 @@ def test_spawn_invalid_node(self):
network_info=self.network_info,
block_device_info=None)
- def test_spawn_with_sparse_image(self):
- # Only a sparse disk image triggers the copy
- self.mox.StubOutWithMock(vmware_images, 'get_vmdk_size_and_properties')
- result = [1024, {"vmware_ostype": "otherGuest",
- "vmware_adaptertype": "lsiLogic",
- "vmware_disktype": "sparse"}]
- vmware_images.get_vmdk_size_and_properties(
- mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(result)
+ @mock.patch.object(nova.virt.vmwareapi.vmware_images.VMwareImage,
+ 'from_image')
+ @mock.patch.object(vmops.VMwareVCVMOps, 'get_copy_virtual_disk_spec')
+ def test_spawn_with_sparse_image(self, mock_get_copy_virtual_disk_spec,
+ mock_from_image):
+ img_info = vmware_images.VMwareImage(
+ image_id=self.fake_image_uuid,
+ file_size=1024,
+ disk_type=constants.DISK_TYPE_SPARSE,
+ linked_clone=False)
- # Ensure VMwareVCVMOps's get_copy_virtual_disk_spec is getting called
- # two times
- self.mox.StubOutWithMock(vmops.VMwareVCVMOps,
- 'get_copy_virtual_disk_spec')
- self.conn._vmops.get_copy_virtual_disk_spec(
- mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(None)
- self.conn._vmops.get_copy_virtual_disk_spec(
- mox.IgnoreArg(), mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(None)
+ mock_from_image.return_value = img_info
+ mock_get_copy_virtual_disk_spec.return_value = None
- self.mox.ReplayAll()
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
'node': self.instance_node})
@@ -2359,6 +2414,117 @@ def test_unplug_vifs(self):
self.conn.unplug_vifs,
instance=self.instance, network_info=None)
+ def _create_vif(self):
+ gw_4 = network_model.IP(address='101.168.1.1', type='gateway')
+ dns_4 = network_model.IP(address='8.8.8.8', type=None)
+ subnet_4 = network_model.Subnet(cidr='101.168.1.0/24',
+ dns=[dns_4],
+ gateway=gw_4,
+ routes=None,
+ dhcp_server='191.168.1.1')
+
+ gw_6 = network_model.IP(address='101:1db9::1', type='gateway')
+ subnet_6 = network_model.Subnet(cidr='101:1db9::/64',
+ dns=None,
+ gateway=gw_6,
+ ips=None,
+ routes=None)
+
+ network_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
+ bridge=None,
+ label=None,
+ subnets=[subnet_4,
+ subnet_6],
+ bridge_interface='eth0',
+ vlan=99)
+
+ vif_bridge_neutron = network_model.VIF(id='new-vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_neutron,
+ type=None,
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc')
+ return vif_bridge_neutron
+
+ def _validate_interfaces(self, id, index, num_iface_ids):
+ vm = self._get_vm_record()
+ found_iface_id = False
+ extras = vm.get("config.extraConfig")
+ key = "nvp.iface-id.%s" % index
+ num_found = 0
+ for c in extras.OptionValue:
+ if c.key.startswith("nvp.iface-id."):
+ num_found += 1
+ if c.key == key and c.value == id:
+ found_iface_id = True
+ self.assertTrue(found_iface_id)
+ self.assertEqual(num_found, num_iface_ids)
+
+ def _attach_interface(self, vif):
+ self.conn.attach_interface(self.instance, self.image, vif)
+ self._validate_interfaces(vif['id'], 1, 2)
+
+ def test_attach_interface(self):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+
+ def test_attach_interface_with_exception(self):
+ self._create_vm()
+ vif = self._create_vif()
+
+ with mock.patch.object(self.conn._session, '_wait_for_task',
+ side_effect=Exception):
+ self.assertRaises(exception.InterfaceAttachFailed,
+ self.conn.attach_interface,
+ self.instance, self.image, vif)
+
+ @mock.patch.object(vif, 'get_network_device',
+ return_value='fake_device')
+ def _detach_interface(self, vif, mock_get_device):
+ self._create_vm()
+ self._attach_interface(vif)
+ self.conn.detach_interface(self.instance, vif)
+ self._validate_interfaces('free', 1, 2)
+
+ def test_detach_interface(self):
+ vif = self._create_vif()
+ self._detach_interface(vif)
+
+ def test_detach_interface_and_attach(self):
+ vif = self._create_vif()
+ self._detach_interface(vif)
+ self.conn.attach_interface(self.instance, self.image, vif)
+ self._validate_interfaces(vif['id'], 1, 2)
+
+ def test_detach_interface_no_device(self):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+ self.assertRaises(exception.NotFound, self.conn.detach_interface,
+ self.instance, vif)
+
+ def test_detach_interface_no_vif_match(self):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+ vif['id'] = 'bad-id'
+ self.assertRaises(exception.NotFound, self.conn.detach_interface,
+ self.instance, vif)
+
+ @mock.patch.object(vif, 'get_network_device',
+ return_value='fake_device')
+ def test_detach_interface_with_exception(self, mock_get_device):
+ self._create_vm()
+ vif = self._create_vif()
+ self._attach_interface(vif)
+
+ with mock.patch.object(self.conn._session, '_wait_for_task',
+ side_effect=Exception):
+ self.assertRaises(exception.InterfaceDetachFailed,
+ self.conn.detach_interface,
+ self.instance, vif)
+
def test_migrate_disk_and_power_off(self):
def fake_update_instance_progress(context, instance, step,
total_steps):
@@ -2367,9 +2533,9 @@ def fake_update_instance_progress(context, instance, step,
def fake_get_host_ref_from_name(dest):
return None
- self._create_vm()
+ self._create_vm(instance_type='m1.large')
vm_ref_orig = vm_util.get_vm_ref(self.conn._session, self.instance)
- flavor = {'name': 'fake', 'flavorid': 'fake_id'}
+ flavor = self._get_instance_type_by_name('m1.large')
self.stubs.Set(self.conn._vmops, "_update_instance_progress",
fake_update_instance_progress)
self.stubs.Set(self.conn._vmops, "_get_host_ref_from_name",
@@ -2440,6 +2606,13 @@ def test_confirm_migration(self):
self._create_vm()
self.conn.confirm_migration(self.context, self.instance, None)
+ def test_resize_to_smaller_disk(self):
+ self._create_vm(instance_type='m1.large')
+ flavor = self._get_instance_type_by_name('m1.small')
+ self.assertRaises(exception.InstanceFaultRollback,
+ self.conn.migrate_disk_and_power_off, self.context,
+ self.instance, 'fake_dest', flavor, None)
+
def test_spawn_attach_volume_vmdk(self):
self._spawn_attach_volume_vmdk(vc_support=True)
@@ -2471,6 +2644,15 @@ def test_rollback_live_migration_at_destination(self):
mock_destroy.assert_called_once_with(self.context,
"instance", [], None)
+ def test_get_instance_disk_info_is_implemented(self):
+ # Ensure that the method has been implemented in the driver
+ try:
+ disk_info = self.conn.get_instance_disk_info('fake_instance_name')
+ self.assertIsNone(disk_info)
+ except NotImplementedError:
+ self.fail("test_get_instance_disk_info() should not raise "
+ "NotImplementedError")
+
def test_destroy(self):
self._create_vm()
info = self.conn.get_info({'uuid': self.uuid,
@@ -2506,7 +2688,6 @@ def test_destroy_non_existent(self):
self.network_info,
None, self.destroy_disks)
mock_destroy.assert_called_once_with(self.instance,
- self.network_info,
self.destroy_disks)
def test_destroy_instance_without_compute(self):
@@ -2526,8 +2707,12 @@ def _test_finish_migration(self, power_on, resize_instance=False):
"""Tests the finish_migration method on VC Driver."""
# setup the test instance in the database
self._create_vm()
- vm_ref = vm_util.get_vm_ref(self.conn._session,
- self.instance)
+ if resize_instance:
+ self.instance.system_metadata = {'old_instance_type_root_gb': '0'}
+ vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
+ datastore = ds_util.Datastore(ref='fake-ref', name='fake')
+ dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
+ vmFolder='fake_folder')
with contextlib.nested(
mock.patch.object(self.conn._session, "_call_method",
return_value='fake-task'),
@@ -2536,9 +2721,17 @@ def _test_finish_migration(self, power_on, resize_instance=False):
mock.patch.object(self.conn._session, "_wait_for_task"),
mock.patch.object(vm_util, "get_vm_resize_spec",
return_value='fake-spec'),
+ mock.patch.object(ds_util, "get_datastore",
+ return_value=datastore),
+ mock.patch.object(self.conn._vmops,
+ 'get_datacenter_ref_and_name',
+ return_value=dc_info),
+ mock.patch.object(self.conn._vmops, '_extend_virtual_disk'),
mock.patch.object(vm_util, "power_on_instance")
) as (fake_call_method, fake_update_instance_progress,
- fake_wait_for_task, fake_vm_resize_spec, fake_power_on):
+ fake_wait_for_task, fake_vm_resize_spec,
+ fake_get_datastore, fake_get_datacenter_ref_and_name,
+ fake_extend_virtual_disk, fake_power_on):
self.conn.finish_migration(context=self.context,
migration=None,
instance=self.instance,
@@ -2552,16 +2745,20 @@ def _test_finish_migration(self, power_on, resize_instance=False):
fake_vm_resize_spec.assert_called_once_with(
self.conn._session._get_vim().client.factory,
self.instance)
- fake_call_method.assert_called_once_with(
+ fake_call_method.assert_any_call(
self.conn._session._get_vim(),
"ReconfigVM_Task",
vm_ref,
spec='fake-spec')
fake_wait_for_task.assert_called_once_with('fake-task')
+ fake_extend_virtual_disk.assert_called_once_with(
+ self.instance, self.instance['root_gb'] * units.Mi,
+ None, dc_info.ref)
else:
self.assertFalse(fake_vm_resize_spec.called)
self.assertFalse(fake_call_method.called)
self.assertFalse(fake_wait_for_task.called)
+ self.assertFalse(fake_extend_virtual_disk.called)
if power_on:
fake_power_on.assert_called_once_with(self.conn._session,
diff --git a/nova/tests/virt/vmwareapi/test_ds_util.py b/nova/tests/virt/vmwareapi/test_ds_util.py
index 06fd242022..1c0aef6470 100644
--- a/nova/tests/virt/vmwareapi/test_ds_util.py
+++ b/nova/tests/virt/vmwareapi/test_ds_util.py
@@ -13,9 +13,12 @@
# under the License.
import contextlib
+import re
import mock
+from nova import exception
+from nova.i18n import _
from nova.openstack.common import units
from nova import test
from nova.tests.virt.vmwareapi import fake
@@ -34,17 +37,11 @@ def tearDown(self):
super(DsUtilTestCase, self).tearDown()
fake.reset()
- def test_build_datastore_path(self):
- path = ds_util.build_datastore_path('ds', 'folder')
- self.assertEqual('[ds] folder', path)
- path = ds_util.build_datastore_path('ds', 'folder/file')
- self.assertEqual('[ds] folder/file', path)
-
def test_file_delete(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('DeleteDatastoreFile_Task', method)
name = kwargs.get('name')
- self.assertEqual('fake-datastore-path', name)
+ self.assertEqual('[ds] fake/path', name)
datacenter = kwargs.get('datacenter')
self.assertEqual('fake-dc-ref', datacenter)
return 'fake_delete_task'
@@ -54,8 +51,9 @@ def fake_call_method(module, method, *args, **kwargs):
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
ds_util.file_delete(self.session,
- 'fake-datastore-path', 'fake-dc-ref')
+ ds_path, 'fake-dc-ref')
_wait_for_task.assert_has_calls([
mock.call('fake_delete_task')])
@@ -77,8 +75,10 @@ def fake_call_method(module, method, *args, **kwargs):
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
+ src_ds_path = ds_util.DatastorePath('ds', 'tmp/src')
+ dst_ds_path = ds_util.DatastorePath('ds', 'base/dst')
ds_util.file_move(self.session,
- 'fake-dc-ref', '[ds] tmp/src', '[ds] base/dst')
+ 'fake-dc-ref', src_ds_path, dst_ds_path)
_wait_for_task.assert_has_calls([
mock.call('fake_move_task')])
@@ -86,7 +86,7 @@ def test_mkdir(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('MakeDirectory', method)
name = kwargs.get('name')
- self.assertEqual('fake-path', name)
+ self.assertEqual('[ds] fake/path', name)
datacenter = kwargs.get('datacenter')
self.assertEqual('fake-dc-ref', datacenter)
createParentDirectories = kwargs.get('createParentDirectories')
@@ -94,7 +94,8 @@ def fake_call_method(module, method, *args, **kwargs):
with mock.patch.object(self.session, '_call_method',
fake_call_method):
- ds_util.mkdir(self.session, 'fake-path', 'fake-dc-ref')
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
+ ds_util.mkdir(self.session, ds_path, 'fake-dc-ref')
def test_file_exists(self):
def fake_call_method(module, method, *args, **kwargs):
@@ -102,7 +103,7 @@ def fake_call_method(module, method, *args, **kwargs):
ds_browser = args[0]
self.assertEqual('fake-browser', ds_browser)
datastorePath = kwargs.get('datastorePath')
- self.assertEqual('fake-path', datastorePath)
+ self.assertEqual('[ds] fake/path', datastorePath)
return 'fake_exists_task'
# Should never get here
@@ -115,6 +116,7 @@ def fake_wait_for_task(task_ref):
result = fake.DataObject()
result.file = [result_file]
+ result.path = '[ds] fake/path'
task_info = fake.DataObject()
task_info.result = result
@@ -129,8 +131,9 @@ def fake_wait_for_task(task_ref):
fake_call_method),
mock.patch.object(self.session, '_wait_for_task',
fake_wait_for_task)):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
file_exists = ds_util.file_exists(self.session,
- 'fake-browser', 'fake-path', 'fake-file')
+ 'fake-browser', ds_path, 'fake-file')
self.assertTrue(file_exists)
def test_file_exists_fails(self):
@@ -153,10 +156,167 @@ def fake_wait_for_task(task_ref):
fake_call_method),
mock.patch.object(self.session, '_wait_for_task',
fake_wait_for_task)):
+ ds_path = ds_util.DatastorePath('ds', 'fake/path')
file_exists = ds_util.file_exists(self.session,
- 'fake-browser', 'fake-path', 'fake-file')
+ 'fake-browser', ds_path, 'fake-file')
self.assertFalse(file_exists)
+ def test_get_datastore(self):
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore())
+ fake_objects.add_object(fake.Datastore("fake-ds-2", 2048, 1000,
+ False, "normal"))
+ fake_objects.add_object(fake.Datastore("fake-ds-3", 4096, 2000,
+ True, "inMaintenance"))
+ result = ds_util.get_datastore(
+ fake.FakeObjectRetrievalSession(fake_objects))
+
+ self.assertEqual("fake-ds", result.name)
+ self.assertEqual(units.Ti, result.capacity)
+ self.assertEqual(500 * units.Gi, result.freespace)
+
+ def test_get_datastore_with_regex(self):
+ # Test with a regex that matches with a datastore
+ datastore_valid_regex = re.compile("^openstack.*\d$")
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore("openstack-ds0"))
+ fake_objects.add_object(fake.Datastore("fake-ds0"))
+ fake_objects.add_object(fake.Datastore("fake-ds1"))
+ result = ds_util.get_datastore(
+ fake.FakeObjectRetrievalSession(fake_objects), None, None,
+ datastore_valid_regex)
+ self.assertEqual("openstack-ds0", result.name)
+
+ def test_get_datastore_with_token(self):
+ regex = re.compile("^ds.*\d$")
+ fake0 = fake.FakeRetrieveResult()
+ fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi))
+ fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi))
+ setattr(fake0, 'token', 'token-0')
+ fake1 = fake.FakeRetrieveResult()
+ fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi))
+ fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi))
+ result = ds_util.get_datastore(
+ fake.FakeObjectRetrievalSession(fake0, fake1), None, None, regex)
+ self.assertEqual("ds2", result.name)
+
+ def test_get_datastore_with_list(self):
+ # Test with a regex containing whitelist of datastores
+ datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)")
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore("openstack-ds0"))
+ fake_objects.add_object(fake.Datastore("openstack-ds1"))
+ fake_objects.add_object(fake.Datastore("openstack-ds2"))
+ result = ds_util.get_datastore(
+ fake.FakeObjectRetrievalSession(fake_objects), None, None,
+ datastore_valid_regex)
+ self.assertNotEqual("openstack-ds1", result.name)
+
+ def test_get_datastore_with_regex_error(self):
+ # Test with a regex that has no match
+ # Checks if code raises DatastoreNotFound with a specific message
+ datastore_invalid_regex = re.compile("unknown-ds")
+ exp_message = (_("Datastore regex %s did not match any datastores")
+ % datastore_invalid_regex.pattern)
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(fake.Datastore("fake-ds0"))
+ fake_objects.add_object(fake.Datastore("fake-ds1"))
+ # assertRaisesRegExp would have been a good choice instead of
+ # try/catch block, but it's available only from Py 2.7.
+ try:
+ ds_util.get_datastore(
+ fake.FakeObjectRetrievalSession(fake_objects), None, None,
+ datastore_invalid_regex)
+ except exception.DatastoreNotFound as e:
+ self.assertEqual(exp_message, e.args[0])
+ else:
+ self.fail("DatastoreNotFound Exception was not raised with "
+ "message: %s" % exp_message)
+
+ def test_get_datastore_without_datastore(self):
+
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ fake.FakeObjectRetrievalSession(None), host="fake-host")
+
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ fake.FakeObjectRetrievalSession(None), cluster="fake-cluster")
+
+ def test_get_datastore_no_host_in_cluster(self):
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ fake.FakeObjectRetrievalSession(""), 'fake_cluster')
+
+ def test_get_datastore_inaccessible_ds(self):
+ data_store = fake.Datastore()
+ data_store.set("summary.accessible", False)
+
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(data_store)
+
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ fake.FakeObjectRetrievalSession(fake_objects))
+
+ def test_get_datastore_ds_in_maintenance(self):
+ data_store = fake.Datastore()
+ data_store.set("summary.maintenanceMode", "inMaintenance")
+
+ fake_objects = fake.FakeRetrieveResult()
+ fake_objects.add_object(data_store)
+
+ self.assertRaises(exception.DatastoreNotFound,
+ ds_util.get_datastore,
+ fake.FakeObjectRetrievalSession(fake_objects))
+
+ def _test_is_datastore_valid(self, accessible=True,
+ maintenance_mode="normal",
+ type="VMFS",
+ datastore_regex=None):
+ propdict = {}
+ propdict["summary.accessible"] = accessible
+ propdict["summary.maintenanceMode"] = maintenance_mode
+ propdict["summary.type"] = type
+ propdict["summary.name"] = "ds-1"
+
+ return ds_util._is_datastore_valid(propdict, datastore_regex)
+
+ def test_is_datastore_valid(self):
+ for ds_type in ds_util.ALLOWED_DATASTORE_TYPES:
+ self.assertTrue(self._test_is_datastore_valid(True,
+ "normal",
+ ds_type))
+
+ def test_is_datastore_valid_inaccessible_ds(self):
+ self.assertFalse(self._test_is_datastore_valid(False,
+ "normal",
+ "VMFS"))
+
+ def test_is_datastore_valid_ds_in_maintenance(self):
+ self.assertFalse(self._test_is_datastore_valid(True,
+ "inMaintenance",
+ "VMFS"))
+
+ def test_is_datastore_valid_ds_type_invalid(self):
+ self.assertFalse(self._test_is_datastore_valid(True,
+ "normal",
+ "vfat"))
+
+ def test_is_datastore_valid_not_matching_regex(self):
+ datastore_regex = re.compile("ds-2")
+ self.assertFalse(self._test_is_datastore_valid(True,
+ "normal",
+ "VMFS",
+ datastore_regex))
+
+ def test_is_datastore_valid_matching_regex(self):
+ datastore_regex = re.compile("ds-1")
+ self.assertTrue(self._test_is_datastore_valid(True,
+ "normal",
+ "VMFS",
+ datastore_regex))
+
class DatastoreTestCase(test.NoDBTestCase):
def test_ds(self):
@@ -287,6 +447,28 @@ def test_ds_path_non_equivalence(self):
p = ds_util.DatastorePath(t[0], *t[1])
self.assertNotEqual(str(canonical_p), str(p))
+ def test_equal(self):
+ a = ds_util.DatastorePath('ds_name', 'a')
+ b = ds_util.DatastorePath('ds_name', 'a')
+ self.assertEqual(a, b)
+
+ def test_join(self):
+ p = ds_util.DatastorePath('ds_name', 'a')
+ ds_path = p.join('b')
+ self.assertEqual('[ds_name] a/b', str(ds_path))
+
+ p = ds_util.DatastorePath('ds_name', 'a')
+ ds_path = p.join()
+ self.assertEqual('[ds_name] a', str(ds_path))
+
+ bad_args = [
+ [None],
+ ['', None],
+ ['a', None],
+ ['a', None, 'b']]
+ for arg in bad_args:
+ self.assertRaises(ValueError, p.join, *arg)
+
def test_ds_path_parse(self):
p = ds_util.DatastorePath.parse('[dsname]')
self.assertEqual('dsname', p.datastore)
diff --git a/nova/tests/virt/vmwareapi/test_vm_util_datastore_selection.py b/nova/tests/virt/vmwareapi/test_ds_util_datastore_selection.py
similarity index 79%
rename from nova/tests/virt/vmwareapi/test_vm_util_datastore_selection.py
rename to nova/tests/virt/vmwareapi/test_ds_util_datastore_selection.py
index b988b57486..ca211902cf 100644
--- a/nova/tests/virt/vmwareapi/test_vm_util_datastore_selection.py
+++ b/nova/tests/virt/vmwareapi/test_ds_util_datastore_selection.py
@@ -17,7 +17,6 @@
from nova.openstack.common import units
from nova import test
from nova.virt.vmwareapi import ds_util
-from nova.virt.vmwareapi import vm_util
ResultSet = collections.namedtuple('ResultSet', ['objects'])
ResultSetToken = collections.namedtuple('ResultSet', ['objects', 'token'])
@@ -26,15 +25,16 @@
MoRef = collections.namedtuple('ManagedObjectReference', ['value'])
-class VMwareVMUtilDatastoreSelectionTestCase(test.NoDBTestCase):
+class VMwareDSUtilDatastoreSelectionTestCase(test.NoDBTestCase):
def setUp(self):
- super(VMwareVMUtilDatastoreSelectionTestCase, self).setUp()
+ super(VMwareDSUtilDatastoreSelectionTestCase, self).setUp()
self.data = [
- ['VMFS', 'os-some-name', True, 987654321, 12346789],
- ['NFS', 'another-name', True, 9876543210, 123467890],
- ['BAD', 'some-name-bad', True, 98765432100, 1234678900],
- ['VMFS', 'some-name-good', False, 987654321, 12346789],
+ ['VMFS', 'os-some-name', True, 'normal', 987654321, 12346789],
+ ['NFS', 'another-name', True, 'normal', 9876543210, 123467890],
+ ['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900],
+ ['VMFS', 'some-name-good', False, 'normal', 987654321, 12346789],
+ ['VMFS', 'new-name', True, 'inMaintenance', 987654321, 12346789]
]
def build_result_set(self, mock_data, name_list=None):
@@ -57,13 +57,14 @@ def build_result_set(self, mock_data, name_list=None):
@property
def propset_name_list(self):
return ['summary.type', 'summary.name', 'summary.accessible',
- 'summary.capacity', 'summary.freeSpace']
+ 'summary.maintenanceMode', 'summary.capacity',
+ 'summary.freeSpace']
def test_filter_datastores_simple(self):
datastores = self.build_result_set(self.data)
best_match = ds_util.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
- rec = vm_util._select_datastore(datastores, best_match)
+ rec = ds_util._select_datastore(datastores, best_match)
self.assertIsNotNone(rec.ref, "could not find datastore!")
self.assertEqual('ds-001', rec.ref.value,
@@ -77,7 +78,7 @@ def test_filter_datastores_empty(self):
best_match = ds_util.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
- rec = vm_util._select_datastore(datastores, best_match)
+ rec = ds_util._select_datastore(datastores, best_match)
self.assertEqual(rec, best_match)
@@ -87,7 +88,7 @@ def test_filter_datastores_no_match(self):
best_match = ds_util.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
- rec = vm_util._select_datastore(datastores,
+ rec = ds_util._select_datastore(datastores,
best_match,
datastore_regex)
@@ -96,11 +97,14 @@ def test_filter_datastores_no_match(self):
def test_filter_datastores_specific_match(self):
data = [
- ['VMFS', 'os-some-name', True, 987654321, 1234678],
- ['NFS', 'another-name', True, 9876543210, 123467890],
- ['BAD', 'some-name-bad', True, 98765432100, 1234678900],
- ['VMFS', 'some-name-good', True, 987654321, 12346789],
- ['VMFS', 'some-other-good', False, 987654321000, 12346789000],
+ ['VMFS', 'os-some-name', True, 'normal', 987654321, 1234678],
+ ['NFS', 'another-name', True, 'normal', 9876543210, 123467890],
+ ['BAD', 'some-name-bad', True, 'normal', 98765432100, 1234678900],
+ ['VMFS', 'some-name-good', True, 'normal', 987654321, 12346789],
+ ['VMFS', 'some-other-good', False, 'normal', 987654321000,
+ 12346789000],
+ ['VMFS', 'new-name', True, 'inMaintenance', 987654321000,
+ 12346789000]
]
# only the DS some-name-good is accessible and matches the regex
datastores = self.build_result_set(data)
@@ -108,7 +112,7 @@ def test_filter_datastores_specific_match(self):
best_match = ds_util.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
- rec = vm_util._select_datastore(datastores,
+ rec = ds_util._select_datastore(datastores,
best_match,
datastore_regex)
@@ -135,7 +139,7 @@ def test_filter_datastores_missing_props(self):
best_match = ds_util.Datastore(ref='fake_ref', name='ds',
capacity=0, freespace=0)
- rec = vm_util._select_datastore(datastores, best_match)
+ rec = ds_util._select_datastore(datastores, best_match)
self.assertEqual(rec, best_match, "no matches were expected")
def test_filter_datastores_best_match(self):
@@ -153,7 +157,7 @@ def test_filter_datastores_best_match(self):
# the current best match is better than all candidates
best_match = ds_util.Datastore(ref='ds-100', name='best-ds-good',
capacity=20 * units.Gi, freespace=19 * units.Gi)
- rec = vm_util._select_datastore(datastores,
+ rec = ds_util._select_datastore(datastores,
best_match,
datastore_regex)
self.assertEqual(rec, best_match, "did not match datastore properly")
diff --git a/nova/tests/virt/vmwareapi/test_imagecache.py b/nova/tests/virt/vmwareapi/test_imagecache.py
index 4917296d7f..fa3ceeb583 100644
--- a/nova/tests/virt/vmwareapi/test_imagecache.py
+++ b/nova/tests/virt/vmwareapi/test_imagecache.py
@@ -20,6 +20,7 @@
from nova.openstack.common import timeutils
from nova import test
+from nova.tests import fake_instance
from nova.tests.virt.vmwareapi import fake
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import imagecache
@@ -46,7 +47,7 @@ def tearDown(self):
def test_timestamp_cleanup(self):
def fake_get_timestamp(ds_browser, ds_path):
self.assertEqual('fake-ds-browser', ds_browser)
- self.assertEqual('fake-ds-path', ds_path)
+ self.assertEqual('[fake-ds] fake-path', str(ds_path))
if not self.exists:
return
ts = '%s%s' % (imagecache.TIMESTAMP_PREFIX,
@@ -61,19 +62,22 @@ def fake_get_timestamp(ds_browser, ds_path):
) as (_get_timestamp, _file_delete):
self.exists = False
self._imagecache.timestamp_cleanup(
- 'fake-dc-ref', 'fake-ds-browser', 'fake-ds-path')
+ 'fake-dc-ref', 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
self.assertEqual(0, _file_delete.call_count)
self.exists = True
self._imagecache.timestamp_cleanup(
- 'fake-dc-ref', 'fake-ds-browser', 'fake-ds-path')
+ 'fake-dc-ref', 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
+ expected_ds_path = ds_util.DatastorePath(
+ 'fake-ds', 'fake-path', self._file_name)
_file_delete.assert_called_once_with(self._session,
- 'fake-ds-path/ts-2012-11-22-12-00-00',
- 'fake-dc-ref')
+ expected_ds_path, 'fake-dc-ref')
def test_get_timestamp(self):
def fake_get_sub_folders(session, ds_browser, ds_path):
self.assertEqual('fake-ds-browser', ds_browser)
- self.assertEqual('fake-ds-path', ds_path)
+ self.assertEqual('[fake-ds] fake-path', str(ds_path))
if self.exists:
files = set()
files.add(self._file_name)
@@ -84,12 +88,14 @@ def fake_get_sub_folders(session, ds_browser, ds_path):
fake_get_sub_folders)
):
self.exists = True
- ts = self._imagecache._get_timestamp('fake-ds-browser',
- 'fake-ds-path')
+ ts = self._imagecache._get_timestamp(
+ 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
self.assertEqual(self._file_name, ts)
self.exists = False
- ts = self._imagecache._get_timestamp('fake-ds-browser',
- 'fake-ds-path')
+ ts = self._imagecache._get_timestamp(
+ 'fake-ds-browser',
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
self.assertIsNone(ts)
def test_get_timestamp_filename(self):
@@ -131,9 +137,8 @@ def fake_get_sub_folders(session, ds_browser, ds_path):
fake_get_sub_folders)
) as (_get_dynamic, _get_sub_folders):
fake_ds_ref = fake.ManagedObjectReference('fake-ds-ref')
- datastore = {'name': 'ds', 'ref': fake_ds_ref}
- ds_path = ds_util.build_datastore_path(datastore['name'],
- 'base_folder')
+ datastore = ds_util.Datastore(name='ds', ref=fake_ds_ref)
+ ds_path = datastore.build_path('base_folder')
images = self._imagecache._list_datastore_images(
ds_path, datastore)
originals = set()
@@ -146,29 +151,30 @@ def test_age_cached_images(self):
def fake_get_ds_browser(ds_ref):
return 'fake-ds-browser'
- def fake_get_timestamp(ds_browser, path):
+ def fake_get_timestamp(ds_browser, ds_path):
self._get_timestamp_called += 1
- if path == 'fake-ds-path/fake-image-1':
+ path = str(ds_path)
+ if path == '[fake-ds] fake-path/fake-image-1':
# No time stamp exists
return
- if path == 'fake-ds-path/fake-image-2':
+ if path == '[fake-ds] fake-path/fake-image-2':
# Timestamp that will be valid => no deletion
return 'ts-2012-11-22-10-00-00'
- if path == 'fake-ds-path/fake-image-3':
+ if path == '[fake-ds] fake-path/fake-image-3':
# Timestamp that will be invalid => deletion
return 'ts-2012-11-20-12-00-00'
self.fail()
def fake_mkdir(session, ts_path, dc_ref):
self.assertEqual(
- 'fake-ds-path/fake-image-1/ts-2012-11-22-12-00-00',
- ts_path)
+ '[fake-ds] fake-path/fake-image-1/ts-2012-11-22-12-00-00',
+ str(ts_path))
- def fake_file_delete(session, path, dc_ref):
- self.assertEqual('fake-ds-path/fake-image-3', path)
+ def fake_file_delete(session, ds_path, dc_ref):
+ self.assertEqual('[fake-ds] fake-path/fake-image-3', str(ds_path))
- def fake_timestamp_cleanup(dc_ref, ds_browser, path):
- self.assertEqual('fake-ds-path/fake-image-4', path)
+ def fake_timestamp_cleanup(dc_ref, ds_browser, ds_path):
+ self.assertEqual('[fake-ds] fake-path/fake-image-4', str(ds_path))
with contextlib.nested(
mock.patch.object(self._imagecache, '_get_ds_browser',
@@ -184,15 +190,16 @@ def fake_timestamp_cleanup(dc_ref, ds_browser, path):
) as (_get_ds_browser, _get_timestamp, _mkdir, _file_delete,
_timestamp_cleanup):
timeutils.set_time_override(override_time=self._time)
- datastore = {'name': 'ds', 'ref': 'fake-ds-ref'}
+ datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref')
dc_info = vmops.DcInfo(ref='dc_ref', name='name',
vmFolder='vmFolder')
self._get_timestamp_called = 0
self._imagecache.originals = set(['fake-image-1', 'fake-image-2',
'fake-image-3', 'fake-image-4'])
self._imagecache.used_images = set(['fake-image-4'])
- self._imagecache._age_cached_images('fake-context',
- datastore, dc_info, 'fake-ds-path')
+ self._imagecache._age_cached_images(
+ 'fake-context', datastore, dc_info,
+ ds_util.DatastorePath('fake-ds', 'fake-path'))
self.assertEqual(3, self._get_timestamp_called)
def test_update(self):
@@ -202,7 +209,7 @@ def fake_list_datastore_images(ds_path, datastore):
def fake_age_cached_images(context, datastore,
dc_info, ds_path):
- self.assertEqual('[ds] fake-base-folder', ds_path)
+ self.assertEqual('[ds] fake-base-folder', str(ds_path))
self.assertEqual(self.images,
self._imagecache.used_images)
self.assertEqual(self.images,
@@ -227,9 +234,11 @@ def fake_age_cached_images(context, datastore,
'uuid': '456',
'vm_state': '',
'task_state': ''}]
+ all_instances = [fake_instance.fake_instance_obj(None, **instance)
+ for instance in instances]
self.images = set(['1', '2'])
- datastore = {'name': 'ds', 'ref': 'fake-ds-ref'}
+ datastore = ds_util.Datastore(name='ds', ref='fake-ds-ref')
dc_info = vmops.DcInfo(ref='dc_ref', name='name',
vmFolder='vmFolder')
datastores_info = [(datastore, dc_info)]
- self._imagecache.update('context', instances, datastores_info)
+ self._imagecache.update('context', all_instances, datastores_info)
diff --git a/nova/tests/virt/vmwareapi/test_vif.py b/nova/tests/virt/vmwareapi/test_vif.py
index 337d6da723..fd280aff9e 100644
--- a/nova/tests/virt/vmwareapi/test_vif.py
+++ b/nova/tests/virt/vmwareapi/test_vif.py
@@ -328,6 +328,11 @@ def test_get_vif_info_none(self):
'is_neutron', 'fake_model', None)
self.assertEqual([], vif_info)
+ def test_get_vif_info_empty_list(self):
+ vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
+ 'is_neutron', 'fake_model', [])
+ self.assertEqual([], vif_info)
+
@mock.patch.object(vif, 'get_network_ref', return_value='fake_ref')
def test_get_vif_info(self, mock_get_network_ref):
network_info = utils.get_test_network_info()
diff --git a/nova/tests/virt/vmwareapi/test_vim_util.py b/nova/tests/virt/vmwareapi/test_vim_util.py
index a8aef84eb3..fe5ef155f9 100644
--- a/nova/tests/virt/vmwareapi/test_vim_util.py
+++ b/nova/tests/virt/vmwareapi/test_vim_util.py
@@ -43,7 +43,7 @@ class VMwareVIMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVIMUtilTestCase, self).setUp()
- fake.reset(vc=True)
+ fake.reset()
self.vim = fake.FakeVim()
self.vim._login()
diff --git a/nova/tests/virt/vmwareapi/test_vm_util.py b/nova/tests/virt/vmwareapi/test_vm_util.py
index d6d7285fae..5bff7e896d 100644
--- a/nova/tests/virt/vmwareapi/test_vm_util.py
+++ b/nova/tests/virt/vmwareapi/test_vm_util.py
@@ -22,8 +22,6 @@
from nova import exception
from nova.network import model as network_model
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import test
from nova.tests.virt.vmwareapi import fake
@@ -47,28 +45,6 @@ def tearDown(self):
super(VMwareVMUtilTestCase, self).tearDown()
fake.reset()
- def test_get_datastore(self):
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(fake.Datastore())
- result = vm_util.get_datastore(
- fake.FakeObjectRetrievalSession(fake_objects))
-
- self.assertEqual("fake-ds", result.name)
- self.assertEqual(units.Ti, result.capacity)
- self.assertEqual(500 * units.Gi, result.freespace)
-
- def test_get_datastore_with_regex(self):
- # Test with a regex that matches with a datastore
- datastore_valid_regex = re.compile("^openstack.*\d$")
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(fake.Datastore("openstack-ds0"))
- fake_objects.add_object(fake.Datastore("fake-ds0"))
- fake_objects.add_object(fake.Datastore("fake-ds1"))
- result = vm_util.get_datastore(
- fake.FakeObjectRetrievalSession(fake_objects),
- None, None, datastore_valid_regex)
- self.assertEqual("openstack-ds0", result.name)
-
def _test_get_stats_from_cluster(self, connection_state="connected",
maintenance_mode=False):
ManagedObjectRefs = [fake.ManagedObjectReference("host1",
@@ -148,62 +124,6 @@ def test_get_stats_from_cluster_hosts_disconnected_and_active(self):
def test_get_stats_from_cluster_hosts_connected_and_maintenance(self):
self._test_get_stats_from_cluster(maintenance_mode=True)
- def test_get_datastore_with_token(self):
- regex = re.compile("^ds.*\d$")
- fake0 = fake.FakeRetrieveResult()
- fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi))
- fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi))
- setattr(fake0, 'token', 'token-0')
- fake1 = fake.FakeRetrieveResult()
- fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi))
- fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi))
- result = vm_util.get_datastore(
- fake.FakeObjectRetrievalSession(fake0, fake1), None, None, regex)
- self.assertEqual("ds2", result.name)
-
- def test_get_datastore_with_list(self):
- # Test with a regex containing whitelist of datastores
- datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)")
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(fake.Datastore("openstack-ds0"))
- fake_objects.add_object(fake.Datastore("openstack-ds1"))
- fake_objects.add_object(fake.Datastore("openstack-ds2"))
- result = vm_util.get_datastore(
- fake.FakeObjectRetrievalSession(fake_objects),
- None, None, datastore_valid_regex)
- self.assertNotEqual("openstack-ds1", result.name)
-
- def test_get_datastore_with_regex_error(self):
- # Test with a regex that has no match
- # Checks if code raises DatastoreNotFound with a specific message
- datastore_invalid_regex = re.compile("unknown-ds")
- exp_message = (_("Datastore regex %s did not match any datastores")
- % datastore_invalid_regex.pattern)
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(fake.Datastore("fake-ds0"))
- fake_objects.add_object(fake.Datastore("fake-ds1"))
- # assertRaisesRegExp would have been a good choice instead of
- # try/catch block, but it's available only from Py 2.7.
- try:
- vm_util.get_datastore(
- fake.FakeObjectRetrievalSession(fake_objects), None, None,
- datastore_invalid_regex)
- except exception.DatastoreNotFound as e:
- self.assertEqual(exp_message, e.args[0])
- else:
- self.fail("DatastoreNotFound Exception was not raised with "
- "message: %s" % exp_message)
-
- def test_get_datastore_without_datastore(self):
-
- self.assertRaises(exception.DatastoreNotFound,
- vm_util.get_datastore,
- fake.FakeObjectRetrievalSession(None), host="fake-host")
-
- self.assertRaises(exception.DatastoreNotFound,
- vm_util.get_datastore,
- fake.FakeObjectRetrievalSession(None), cluster="fake-cluster")
-
def test_get_host_ref_from_id(self):
fake_host_name = "ha-host"
fake_host_sys = fake.HostSystem(fake_host_name)
@@ -226,11 +146,6 @@ def test_get_host_ref_no_hosts_in_cluster(self):
vm_util.get_host_ref,
fake.FakeObjectRetrievalSession(""), 'fake_cluster')
- def test_get_datastore_no_host_in_cluster(self):
- self.assertRaises(exception.DatastoreNotFound,
- vm_util.get_datastore,
- fake.FakeObjectRetrievalSession(""), 'fake_cluster')
-
@mock.patch.object(vm_util, '_get_vm_ref_from_vm_uuid',
return_value=None)
def test_get_host_name_for_vm(self, _get_ref_from_uuid):
@@ -305,17 +220,6 @@ def test_property_from_property_set(self):
self.assertIsNotNone(prop4)
self.assertEqual('bar1', prop4.val)
- def test_get_datastore_inaccessible_ds(self):
- data_store = fake.Datastore()
- data_store.set("summary.accessible", False)
-
- fake_objects = fake.FakeRetrieveResult()
- fake_objects.add_object(data_store)
-
- self.assertRaises(exception.DatastoreNotFound,
- vm_util.get_datastore,
- fake.FakeObjectRetrievalSession(fake_objects))
-
def test_get_resize_spec(self):
fake_instance = {'id': 7, 'name': 'fake!',
'uuid': 'bda5fb9e-b347-40e8-8256-42397848cb00',
@@ -347,6 +251,7 @@ def test_get_cdrom_attach_config_spec(self):
},
'backing': {
'datastore': {
+ "summary.maintenanceMode": "normal",
"summary.type": "VMFS",
"summary.accessible":true,
"summary.name": "fake-ds",
@@ -396,6 +301,18 @@ def _vmdk_path_and_adapter_type_devices(self, filename, parent=None):
devices = [disk, controller]
return devices
+ def test_get_vmdk_path(self):
+ uuid = '00000000-0000-0000-0000-000000000000'
+ filename = '[test_datastore] %s/%s.vmdk' % (uuid, uuid)
+ devices = self._vmdk_path_and_adapter_type_devices(filename)
+ session = fake.FakeSession()
+
+ with mock.patch.object(session, '_call_method',
+ return_value=devices):
+ instance = {'uuid': uuid}
+ vmdk_path = vm_util.get_vmdk_path(session, None, instance)
+ self.assertEqual(filename, vmdk_path)
+
def test_get_vmdk_path_and_adapter_type(self):
filename = '[test_datastore] test_file.vmdk'
devices = self._vmdk_path_and_adapter_type_devices(filename)
@@ -685,18 +602,18 @@ def fake_wait_for_task(self, *args):
def test_convert_vif_model(self):
expected = "VirtualE1000"
- result = vm_util._convert_vif_model(network_model.VIF_MODEL_E1000)
+ result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000)
self.assertEqual(expected, result)
expected = "VirtualE1000e"
- result = vm_util._convert_vif_model(network_model.VIF_MODEL_E1000E)
+ result = vm_util.convert_vif_model(network_model.VIF_MODEL_E1000E)
self.assertEqual(expected, result)
types = ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32",
"VirtualVmxnet"]
for type in types:
self.assertEqual(type,
- vm_util._convert_vif_model(type))
+ vm_util.convert_vif_model(type))
self.assertRaises(exception.Invalid,
- vm_util._convert_vif_model,
+ vm_util.convert_vif_model,
"InvalidVifModel")
def test_power_on_instance_with_vm_ref(self):
@@ -819,12 +736,8 @@ def _create_fake_vm_objects(self):
def test_get_values(self):
objects = self._create_fake_vm_objects()
- lst_properties = ['runtime.powerState',
- 'summary.guest.toolsStatus',
- 'summary.guest.toolsRunningStatus']
query = vm_util.get_values_from_object_properties(
- fake.FakeObjectRetrievalSession(objects),
- objects, lst_properties)
+ fake.FakeObjectRetrievalSession(objects), objects)
self.assertEqual('poweredOn', query['runtime.powerState'])
self.assertEqual('guestToolsRunning',
query['summary.guest.toolsRunningStatus'])
@@ -842,3 +755,167 @@ def test_reconfigure_vm(self):
'ReconfigVM_Task', 'fake-ref', spec='fake-spec')
_wait_for_task.assert_called_once_with(
'fake_reconfigure_task')
+
+ def test_get_network_attach_config_spec_opaque(self):
+ vif_info = {'network_name': 'br-int',
+ 'mac_address': '00:00:00:ca:fe:01',
+ 'network_ref': {'type': 'OpaqueNetwork',
+ 'network-id': 'fake-network-id',
+ 'network-type': 'opaque'},
+ 'iface_id': 7,
+ 'vif_model': 'VirtualE1000'}
+ result = vm_util.get_network_attach_config_spec(
+ fake.FakeFactory(), vif_info, 1)
+ card = 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo'
+ expected = """{
+ 'extraConfig': [{'value': 7,
+ 'key': 'nvp.iface-id.1',
+ 'obj_name':'ns0:OptionValue'}],
+ 'deviceChange': [
+ {'device': {
+ 'macAddress':'00:00:00:ca:fe:01',
+ 'addressType': 'manual',
+ 'connectable': {
+ 'allowGuestControl':True,
+ 'startConnected': True,
+ 'connected': True,
+ 'obj_name':'ns0:VirtualDeviceConnectInfo'},
+ 'backing': {
+ 'opaqueNetworkType': 'opaque',
+ 'opaqueNetworkId': 'fake-network-id',
+ 'obj_name': '%(card)s'},
+ 'key': -47,
+ 'obj_name': 'ns0:VirtualE1000',
+ 'wakeOnLanEnabled': True},
+ 'operation': 'add',
+ 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
+ 'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {'card': card}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_network_attach_config_spec_dvs(self):
+ vif_info = {'network_name': 'br100',
+ 'mac_address': '00:00:00:ca:fe:01',
+ 'network_ref': {'type': 'DistributedVirtualPortgroup',
+ 'dvsw': 'fake-network-id',
+ 'dvpg': 'fake-group'},
+ 'iface_id': 7,
+ 'vif_model': 'VirtualE1000'}
+ result = vm_util.get_network_attach_config_spec(
+ fake.FakeFactory(), vif_info, 1)
+ port = 'ns0:DistributedVirtualSwitchPortConnection'
+ backing = 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo'
+ expected = """{
+ 'extraConfig': [{'value': 7,
+ 'key': 'nvp.iface-id.1',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'deviceChange': [
+ {'device': {'macAddress': '00:00:00:ca:fe:01',
+ 'addressType': 'manual',
+ 'connectable': {
+ 'allowGuestControl': True,
+ 'startConnected': True,
+ 'connected': True,
+ 'obj_name': 'ns0:VirtualDeviceConnectInfo'},
+ 'backing': {
+ 'port': {
+ 'portgroupKey': 'fake-group',
+ 'switchUuid': 'fake-network-id',
+ 'obj_name': '%(obj_name_port)s'},
+ 'obj_name': '%(obj_name_backing)s'},
+ 'key': -47,
+ 'obj_name': 'ns0:VirtualE1000',
+ 'wakeOnLanEnabled': True},
+ 'operation': 'add',
+ 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
+ 'obj_name':'ns0:VirtualMachineConfigSpec'}""" % {
+ 'obj_name_backing': backing,
+ 'obj_name_port': port}
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ def test_get_network_detach_config_spec(self):
+ result = vm_util.get_network_detach_config_spec(
+ fake.FakeFactory(), 'fake-device', 2)
+ expected = """{
+ 'extraConfig': [{'value': 'free',
+ 'key': 'nvp.iface-id.2',
+ 'obj_name': 'ns0:OptionValue'}],
+ 'deviceChange': [{'device': 'fake-device',
+ 'operation': 'remove',
+ 'obj_name': 'ns0:VirtualDeviceConfigSpec'}],
+ 'obj_name':'ns0:VirtualMachineConfigSpec'}"""
+ expected = re.sub(r'\s+', '', expected)
+ result = re.sub(r'\s+', '', repr(result))
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(vm_util, "get_vm_ref")
+ def test_power_off_instance(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(session, '_wait_for_task')
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
+ fake_call_method.assert_called_once_with(session._get_vim(),
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ self.assertFalse(fake_get_ref.called)
+
+ @mock.patch.object(vm_util, "get_vm_ref", return_value="fake-vm-ref")
+ def test_power_off_instance_no_vm_ref(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(session, '_wait_for_task')
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_off_instance(session, fake_instance)
+ fake_get_ref.assert_called_once_with(session, fake_instance)
+ fake_call_method.assert_called_once_with(session._get_vim(),
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+
+ @mock.patch.object(vm_util, "get_vm_ref")
+ def test_power_off_instance_with_exception(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(session, '_wait_for_task',
+ side_effect=exception.NovaException('fake'))
+ ) as (fake_call_method, fake_wait_for_task):
+ self.assertRaises(exception.NovaException,
+ vm_util.power_off_instance,
+ session, fake_instance, 'fake-vm-ref')
+ fake_call_method.assert_called_once_with(session._get_vim(),
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ self.assertFalse(fake_get_ref.called)
+
+ @mock.patch.object(vm_util, "get_vm_ref")
+ def test_power_off_instance_power_state_exception(self, fake_get_ref):
+ session = fake.FakeSession()
+ fake_instance = mock.MagicMock()
+ with contextlib.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake-task'),
+ mock.patch.object(
+ session, '_wait_for_task',
+ side_effect=error_util.InvalidPowerStateException)
+ ) as (fake_call_method, fake_wait_for_task):
+ vm_util.power_off_instance(session, fake_instance, 'fake-vm-ref')
+ fake_call_method.assert_called_once_with(session._get_vim(),
+ "PowerOffVM_Task",
+ 'fake-vm-ref')
+ fake_wait_for_task.assert_called_once_with('fake-task')
+ self.assertFalse(fake_get_ref.called)
diff --git a/nova/tests/virt/vmwareapi/test_vmops.py b/nova/tests/virt/vmwareapi/test_vmops.py
index 6d1b221f20..e95bbcb26f 100644
--- a/nova/tests/virt/vmwareapi/test_vmops.py
+++ b/nova/tests/virt/vmwareapi/test_vmops.py
@@ -29,6 +29,7 @@
import nova.tests.image.fake
from nova.tests.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.virt.vmwareapi import stubs
+from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
@@ -38,6 +39,39 @@
from nova.virt.vmwareapi import vmware_images
+class VMwareVMOpsSimpleTestCase(test.NoDBTestCase):
+ @mock.patch.object(vm_util, 'get_res_pool_ref')
+ @mock.patch.object(ds_util, 'get_datastore')
+ @mock.patch.object(vmops.VMwareVMOps, 'get_datacenter_ref_and_name')
+ def test_spawn_disk_invalid_disk_size(self,
+ mock_get_datacenter_ref_and_name,
+ mock_get_datastore,
+ mock_get_res_pool_ref):
+ image = {
+ 'id': 'c1c8ce3d-c2e0-4247-890c-ccf5cc1c004c',
+ 'disk_format': 'vmdk',
+ 'size': 999999999 * units.Gi,
+ }
+ self._context = context.RequestContext('fake_user', 'fake_project')
+ instance = fake_instance.fake_instance_obj(self._context,
+ image_ref=nova.tests.image.fake.get_valid_image_id(),
+ uuid='fake_uuid',
+ root_gb=1,
+ node='respool-1001(MyResPoolName)'
+ )
+
+ ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
+ self.assertRaises(exception.InstanceUnacceptable,
+ ops.spawn,
+ mock.Mock(),
+ instance,
+ image,
+ injected_files=[],
+ admin_password=None,
+ network_info=None,
+ block_device_info=None)
+
+
class VMwareVMOpsTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMOpsTestCase, self).setUp()
@@ -54,17 +88,17 @@ def setUp(self):
self._vmops = vmops.VMwareVCVMOps(self._session, self._virtapi, None)
self._image_id = nova.tests.image.fake.get_valid_image_id()
- values = {
+ self._instance_values = {
'name': 'fake_name',
'uuid': 'fake_uuid',
'vcpus': 1,
'memory_mb': 512,
'image_ref': self._image_id,
- 'root_gb': 1,
+ 'root_gb': 10,
'node': 'respool-1001(MyResPoolName)'
}
self._instance = fake_instance.fake_instance_obj(
- self._context, **values)
+ self._context, **self._instance_values)
fake_ds_ref = vmwareapi_fake.ManagedObjectReference('fake-ds')
self._ds = ds_util.Datastore(
@@ -96,15 +130,18 @@ def setUp(self):
vlan=None,
bridge_interface=None,
injected=True)
+ self._network_values = {
+ 'id': None,
+ 'address': 'DE:AD:BE:EF:00:00',
+ 'network': network,
+ 'type': None,
+ 'devname': None,
+ 'ovs_interfaceid': None,
+ 'rxtx_cap': 3
+ }
self.network_info = network_model.NetworkInfo([
- network_model.VIF(id=None,
- address='DE:AD:BE:EF:00:00',
- network=network,
- type=None,
- devname=None,
- ovs_interfaceid=None,
- rxtx_cap=3)
- ])
+ network_model.VIF(**self._network_values)
+ ])
pure_IPv6_network = network_model.Network(id=0,
bridge='fa0',
label='fake',
@@ -122,21 +159,6 @@ def setUp(self):
rxtx_cap=3)
])
- def test_get_disk_format_none(self):
- format, is_iso = self._vmops._get_disk_format({'disk_format': None})
- self.assertIsNone(format)
- self.assertFalse(is_iso)
-
- def test_get_disk_format_iso(self):
- format, is_iso = self._vmops._get_disk_format({'disk_format': 'iso'})
- self.assertEqual('iso', format)
- self.assertTrue(is_iso)
-
- def test_get_disk_format_bad(self):
- self.assertRaises(exception.InvalidDiskFormat,
- self._vmops._get_disk_format,
- {'disk_format': 'foo'})
-
def test_get_machine_id_str(self):
result = vmops.VMwareVMOps._get_machine_id_str(self.network_info)
self.assertEqual(result,
@@ -146,33 +168,6 @@ def test_get_machine_id_str(self):
self.pure_IPv6_network_info)
self.assertEqual('DE:AD:BE:EF:00:00;;;;;#', result)
- def test_use_linked_clone_override_nf(self):
- value = vmops.VMwareVMOps.decide_linked_clone(None, False)
- self.assertFalse(value, "No overrides present but still overridden!")
-
- def test_use_linked_clone_override_none_true(self):
- value = vmops.VMwareVMOps.decide_linked_clone(None, True)
- self.assertTrue(value, "No overrides present but still overridden!")
-
- def test_use_linked_clone_override_ny(self):
- value = vmops.VMwareVMOps.decide_linked_clone(None, "yes")
- self.assertTrue(value, "No overrides present but still overridden!")
-
- def test_use_linked_clone_override_ft(self):
- value = vmops.VMwareVMOps.decide_linked_clone(False, True)
- self.assertFalse(value,
- "image level metadata failed to override global")
-
- def test_use_linked_clone_override_no_true(self):
- value = vmops.VMwareVMOps.decide_linked_clone("no", True)
- self.assertFalse(value,
- "image level metadata failed to override global")
-
- def test_use_linked_clone_override_yf(self):
- value = vmops.VMwareVMOps.decide_linked_clone("yes", False)
- self.assertTrue(value,
- "image level metadata failed to override global")
-
def _setup_create_folder_mocks(self):
ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
base_name = 'folder'
@@ -184,7 +179,7 @@ def _setup_create_folder_mocks(self):
ref=dc_ref,
name='fake-name',
vmFolder='fake-folder')
- path = ds_util.build_datastore_path(ds_name, base_name)
+ path = ds_util.DatastorePath(ds_name, base_name)
ds_util.mkdir = mock.Mock()
return ds_name, ds_ref, ops, path, dc_ref
@@ -406,7 +401,7 @@ def fake_call_method(module, method, *args, **kwargs):
return_value=vm_rescue_ref),
mock.patch.object(self._session, '_call_method',
fake_call_method),
- mock.patch.object(self._vmops, '_power_off_vm_ref'),
+ mock.patch.object(vm_util, 'power_off_instance'),
mock.patch.object(self._vmops, '_destroy_instance'),
mock.patch.object(copy, 'deepcopy', return_value=r_instance)
) as (_get_vmdk_path_and_adapter_type, _get_vmdk_volume_disk,
@@ -427,12 +422,18 @@ def fake_call_method(module, method, *args, **kwargs):
self._instance)
_get_vm_ref_from_name.assert_called_once_with(self._session,
'fake_uuid-rescue')
- _power_off.assert_called_once_with(vm_rescue_ref)
- _destroy_instance.assert_called_once_with(r_instance, None,
+ _power_off.assert_called_once_with(self._session, r_instance,
+ vm_rescue_ref)
+ _destroy_instance.assert_called_once_with(r_instance,
instance_name='fake_uuid-rescue')
def _test_finish_migration(self, power_on=True, resize_instance=False):
"""Tests the finish_migration method on vmops."""
+ if resize_instance:
+ self._instance.system_metadata = {'old_instance_type_root_gb': '0'}
+ datastore = ds_util.Datastore(ref='fake-ref', name='fake')
+ dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
+ vmFolder='fake_folder')
with contextlib.nested(
mock.patch.object(self._session, "_call_method",
return_value='fake-task'),
@@ -440,9 +441,16 @@ def _test_finish_migration(self, power_on=True, resize_instance=False):
mock.patch.object(self._session, "_wait_for_task"),
mock.patch.object(vm_util, "get_vm_resize_spec",
return_value='fake-spec'),
+ mock.patch.object(ds_util, "get_datastore",
+ return_value=datastore),
+ mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
+ return_value=dc_info),
+ mock.patch.object(self._vmops, '_extend_virtual_disk'),
mock.patch.object(vm_util, "power_on_instance")
) as (fake_call_method, fake_update_instance_progress,
- fake_wait_for_task, fake_vm_resize_spec, fake_power_on):
+ fake_wait_for_task, fake_vm_resize_spec,
+ fake_get_datastore, fake_get_datacenter_ref_and_name,
+ fake_extend_virtual_disk, fake_power_on):
self._vmops.finish_migration(context=self._context,
migration=None,
instance=self._instance,
@@ -462,9 +470,13 @@ def _test_finish_migration(self, power_on=True, resize_instance=False):
'f',
spec='fake-spec'))
fake_wait_for_task.assert_called_once_with('fake-task')
+ fake_extend_virtual_disk.assert_called_once_with(
+ self._instance, self._instance['root_gb'] * units.Mi,
+ None, dc_info.ref)
else:
self.assertFalse(fake_vm_resize_spec.called)
self.assertFalse(fake_wait_for_task.called)
+ self.assertFalse(fake_extend_virtual_disk.called)
if power_on:
fake_power_on.assert_called_once_with(self._session,
@@ -511,30 +523,65 @@ def test_finish_revert_migration_power_on(self):
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(power_on=False)
- def test_spawn_mask_block_device_info_password(self):
+ @mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
+ @mock.patch.object(vmops.VMwareVMOps, '_create_config_drive')
+ def test_configure_config_drive(self,
+ mock_create_config_drive,
+ mock_attach_cdrom_to_vm):
+ injected_files = mock.Mock()
+ admin_password = mock.Mock()
+ vm_ref = mock.Mock()
+ mock_create_config_drive.return_value = "fake_iso_path"
+ self._vmops._configure_config_drive(
+ self._instance, vm_ref, self._dc_info, self._ds,
+ injected_files, admin_password)
+
+ upload_iso_path = self._ds.build_path("fake_iso_path")
+ mock_create_config_drive.assert_called_once_with(self._instance,
+ injected_files, admin_password, self._ds.name,
+ self._dc_info.name, self._instance.uuid, "Fake-CookieJar")
+ mock_attach_cdrom_to_vm.assert_called_once_with(
+ vm_ref, self._instance, self._ds.ref, str(upload_iso_path))
+
+ @mock.patch.object(vmops.LOG, 'debug')
+ @mock.patch('nova.virt.vmwareapi.volumeops.VMwareVolumeOps'
+ '.attach_root_volume')
+ def test_spawn_mask_block_device_info_password(self,
+ mock_attach_root_volume,
+ mock_debug):
# Very simple test that just ensures block_device_info auth_password
# is masked when logged; the rest of the test just fails out early.
data = {'auth_password': 'scrubme'}
bdm = [{'connection_info': {'data': data}}]
bdi = {'block_device_mapping': bdm}
+ self.password_logged = False
+
# Tests that the parameters to the to_xml method are sanitized for
# passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
+ self.password_logged = True
self.assertNotIn('scrubme', args[0])
- with mock.patch.object(vmops.LOG, 'debug',
- side_effect=fake_debug) as debug_mock:
- # the invalid disk format will cause an exception
- image_meta = {'disk_format': 'fake'}
- self.assertRaises(exception.InvalidDiskFormat, self._vmops.spawn,
- self._context, self._instance, image_meta,
- injected_files=None, admin_password=None,
- network_info=[], block_device_info=bdi)
- # we don't care what the log message is, we just want to make sure
- # our stub method is called which asserts the password is scrubbed
- self.assertTrue(debug_mock.called)
+ mock_debug.side_effect = fake_debug
+ self.flags(flat_injected=False, vnc_enabled=False)
+ mock_attach_root_volume.side_effect = Exception
+
+ # Call spawn(). We don't care what it does as long as it generates
+ # the log message, which we check below.
+ try:
+ self._vmops.spawn(
+ self._context, self._instance, {},
+ injected_files=None, admin_password=None,
+ network_info=[], block_device_info=bdi
+ )
+ except Exception:
+ pass
+
+ # Check that the relevant log message was generated, and therefore
+ # that we checked it was scrubbed
+ self.assertTrue(self.password_logged)
def test_get_ds_browser(self):
cache = self._vmops._datastore_browser_mapping
@@ -569,7 +616,7 @@ def _verify_spawn_method_calls(self, mock_call_method):
recorded_methods = [c[1][1] for c in mock_call_method.mock_calls]
self.assertEqual(expected_methods, recorded_methods)
- @mock.patch('nova.virt.vmwareapi.vm_util.get_datastore')
+ @mock.patch('nova.virt.vmwareapi.ds_util.get_datastore')
@mock.patch(
'nova.virt.vmwareapi.vmops.VMwareVCVMOps.get_datacenter_ref_and_name')
@mock.patch('nova.virt.vmwareapi.vm_util.get_mo_id_from_instance',
@@ -653,8 +700,8 @@ def _test_spawn(self,
mock_get_res_pool_ref.assert_called_once_with(
self._session, None, 'fake_node_mo_id')
mock_get_vif_info.assert_called_once_with(
- self._session, None, False, network_model.VIF_MODEL_E1000,
- network_info)
+ self._session, None, False,
+ constants.DEFAULT_VIF_MODEL, network_info)
mock_get_create_spec.assert_called_once_with(
self._session._get_vim().client.factory,
self._instance,
@@ -730,3 +777,57 @@ def test_spawn_with_block_device_info(self):
'block_device_mapping': [{'connection_info': 'fake'}]
}
self._test_spawn(block_device_info=block_device_info)
+
+ @mock.patch('nova.virt.vmwareapi.driver.VMwareAPISession._get_vim_object')
+ def test_build_virtual_machine(self, mock_get_vim_object):
+ mock_get_vim_object.return_value = vmwareapi_fake.FakeVim()
+
+ fake_session = driver.VMwareAPISession()
+ fake_vmops = vmops.VMwareVCVMOps(fake_session, None, None)
+
+ image_id = nova.tests.image.fake.get_valid_image_id()
+ image = vmware_images.VMwareImage(image_id=image_id)
+
+ vm_ref = fake_vmops.build_virtual_machine(self._instance,
+ 'fake-instance-name',
+ image, self._dc_info,
+ self._ds, self.network_info)
+
+ vm = vmwareapi_fake._get_object(vm_ref)
+
+ # Test basic VM parameters
+ self.assertEqual('fake-instance-name', vm.name)
+ # NOTE(mdbooth): The instanceUuid behaviour below is apparently
+ # deliberate.
+ self.assertEqual('fake-instance-name',
+ vm.get('summary.config.instanceUuid'))
+ self.assertEqual(self._instance_values['vcpus'],
+ vm.get('summary.config.numCpu'))
+ self.assertEqual(self._instance_values['memory_mb'],
+ vm.get('summary.config.memorySizeMB'))
+
+ # Test NSX config
+ for optval in vm.get('config.extraConfig').OptionValue:
+ if optval.key == 'nvp.vm-uuid':
+ self.assertEqual(self._instance_values['uuid'], optval.value)
+ break
+ else:
+ self.fail('nvp.vm-uuid not found in extraConfig')
+
+ # Test that the VM is associated with the specified datastore
+ datastores = vm.datastore.ManagedObjectReference
+ self.assertEqual(1, len(datastores))
+
+ datastore = vmwareapi_fake._get_object(datastores[0])
+ self.assertEqual(self._ds.name, datastore.get('summary.name'))
+
+ # Test that the VM's network is configured as specified
+ devices = vm.get('config.hardware.device').VirtualDevice
+ for device in devices:
+ if device.obj_name != 'ns0:VirtualE1000':
+ continue
+ self.assertEqual(self._network_values['address'],
+ device.macAddress)
+ break
+ else:
+ self.fail('NIC not configured')
diff --git a/nova/tests/virt/vmwareapi/test_vmware_images.py b/nova/tests/virt/vmwareapi/test_vmware_images.py
index 5e451646fe..8b55c39987 100644
--- a/nova/tests/virt/vmwareapi/test_vmware_images.py
+++ b/nova/tests/virt/vmwareapi/test_vmware_images.py
@@ -19,8 +19,11 @@
import mock
+from nova import exception
+from nova.openstack.common import units
from nova import test
import nova.tests.image.fake
+from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import read_write_util
from nova.virt.vmwareapi import vmware_images
@@ -81,3 +84,135 @@ def fake_write_handle(host, dc_name, ds_name, cookies,
write_file_handle=write_file_handle)
image_download.assert_called_once_with(context, instance['image_ref'])
image_show.assert_called_once_with(context, instance['image_ref'])
+
+ def _setup_mock_get_remote_image_service(self,
+ mock_get_remote_image_service,
+ metadata):
+ mock_image_service = mock.MagicMock()
+ mock_image_service.show.return_value = metadata
+ mock_get_remote_image_service.return_value = [mock_image_service, 'i']
+
+ def test_from_image_with_image_ref(self):
+ raw_disk_size_in_gb = 83
+ raw_disk_size_in_bytes = raw_disk_size_in_gb * units.Gi
+ image_id = nova.tests.image.fake.get_valid_image_id()
+ mdata = {'size': raw_disk_size_in_bytes,
+ 'disk_format': 'vmdk',
+ 'properties': {
+ "vmware_ostype": constants.DEFAULT_OS_TYPE,
+ "vmware_adaptertype": constants.DEFAULT_ADAPTER_TYPE,
+ "vmware_disktype": constants.DEFAULT_DISK_TYPE,
+ "hw_vif_model": constants.DEFAULT_VIF_MODEL,
+ vmware_images.LINKED_CLONE_PROPERTY: True}}
+
+ img_props = vmware_images.VMwareImage.from_image(image_id, mdata)
+
+ image_size_in_kb = raw_disk_size_in_bytes / units.Ki
+ image_size_in_gb = raw_disk_size_in_bytes / units.Gi
+
+ # assert that defaults are set and no value returned is left empty
+ self.assertEqual(constants.DEFAULT_OS_TYPE, img_props.os_type)
+ self.assertEqual(constants.DEFAULT_ADAPTER_TYPE,
+ img_props.adapter_type)
+ self.assertEqual(constants.DEFAULT_DISK_TYPE, img_props.disk_type)
+ self.assertEqual(constants.DEFAULT_VIF_MODEL, img_props.vif_model)
+ self.assertTrue(img_props.linked_clone)
+ self.assertEqual(image_size_in_kb, img_props.file_size_in_kb)
+ self.assertEqual(image_size_in_gb, img_props.file_size_in_gb)
+
+ def _image_build(self, image_lc_setting, global_lc_setting,
+ disk_format=constants.DEFAULT_DISK_FORMAT,
+ os_type=constants.DEFAULT_OS_TYPE,
+ adapter_type=constants.DEFAULT_ADAPTER_TYPE,
+ disk_type=constants.DEFAULT_DISK_TYPE,
+ vif_model=constants.DEFAULT_VIF_MODEL):
+ self.flags(use_linked_clone=global_lc_setting, group='vmware')
+ raw_disk_size_in_gb = 93
+ raw_disk_size_in_btyes = raw_disk_size_in_gb * units.Gi
+
+ image_id = nova.tests.image.fake.get_valid_image_id()
+ mdata = {'size': raw_disk_size_in_btyes,
+ 'disk_format': disk_format,
+ 'properties': {
+ "vmware_ostype": os_type,
+ "vmware_adaptertype": adapter_type,
+ "vmware_disktype": disk_type,
+ "hw_vif_model": vif_model}}
+
+ if image_lc_setting is not None:
+ mdata['properties'][
+ vmware_images.LINKED_CLONE_PROPERTY] = image_lc_setting
+
+ return vmware_images.VMwareImage.from_image(image_id, mdata)
+
+ def test_use_linked_clone_override_nf(self):
+ image_props = self._image_build(None, False)
+ self.assertFalse(image_props.linked_clone,
+ "No overrides present but still overridden!")
+
+ def test_use_linked_clone_override_nt(self):
+ image_props = self._image_build(None, True)
+ self.assertTrue(image_props.linked_clone,
+ "No overrides present but still overridden!")
+
+ def test_use_linked_clone_override_ny(self):
+ image_props = self._image_build(None, "yes")
+ self.assertTrue(image_props.linked_clone,
+ "No overrides present but still overridden!")
+
+ def test_use_linked_clone_override_ft(self):
+ image_props = self._image_build(False, True)
+ self.assertFalse(image_props.linked_clone,
+ "image level metadata failed to override global")
+
+ def test_use_linked_clone_override_string_nt(self):
+ image_props = self._image_build("no", True)
+ self.assertFalse(image_props.linked_clone,
+ "image level metadata failed to override global")
+
+ def test_use_linked_clone_override_string_yf(self):
+ image_props = self._image_build("yes", False)
+ self.assertTrue(image_props.linked_clone,
+ "image level metadata failed to override global")
+
+ def test_use_disk_format_none(self):
+ image = self._image_build(None, True, disk_format=None)
+ self.assertIsNone(image.file_type)
+ self.assertFalse(image.is_iso)
+
+ def test_use_disk_format_iso(self):
+ image = self._image_build(None, True, disk_format='iso')
+ self.assertEqual('iso', image.file_type)
+ self.assertTrue(image.is_iso)
+
+ def test_use_bad_disk_format(self):
+ self.assertRaises(exception.InvalidDiskFormat,
+ self._image_build,
+ None,
+ True,
+ disk_format='bad_disk_format')
+
+ def test_image_no_defaults(self):
+ image = self._image_build(False, False,
+ disk_format='iso',
+ os_type='fake-os-type',
+ adapter_type='fake-adapter-type',
+ disk_type='fake-disk-type',
+ vif_model='fake-vif-model')
+ self.assertEqual('iso', image.file_type)
+ self.assertEqual('fake-os-type', image.os_type)
+ self.assertEqual('fake-adapter-type', image.adapter_type)
+ self.assertEqual('fake-disk-type', image.disk_type)
+ self.assertEqual('fake-vif-model', image.vif_model)
+ self.assertFalse(image.linked_clone)
+
+ def test_image_defaults(self):
+ image = vmware_images.VMwareImage(image_id='fake-image-id')
+
+ # N.B. We intentially don't use the defined constants here. Amongst
+ # other potential failures, we're interested in changes to their
+ # values, which would not otherwise be picked up.
+ self.assertEqual('otherGuest', image.os_type)
+ self.assertEqual('lsiLogic', image.adapter_type)
+ self.assertEqual('preallocated', image.disk_type)
+ self.assertEqual('e1000', image.vif_model)
diff --git a/nova/tests/virt/xenapi/image/test_bittorrent.py b/nova/tests/virt/xenapi/image/test_bittorrent.py
index e3a70c5a24..2ebb52f79f 100644
--- a/nova/tests/virt/xenapi/image/test_bittorrent.py
+++ b/nova/tests/virt/xenapi/image/test_bittorrent.py
@@ -17,7 +17,7 @@
import pkg_resources
from nova import context
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova import test
from nova.tests.virt.xenapi import stubs
from nova.virt.xenapi import driver as xenapi_conn
diff --git a/nova/tests/virt/xenapi/stubs.py b/nova/tests/virt/xenapi/stubs.py
index 3ad289d1c6..a2225af388 100644
--- a/nova/tests/virt/xenapi/stubs.py
+++ b/nova/tests/virt/xenapi/stubs.py
@@ -44,7 +44,7 @@ def fake_fetch_image(context, session, instance, name_label, image, type):
stubs.Set(vm_utils, '_fetch_image', fake_fetch_image)
def fake_wait_for_vhd_coalesce(*args):
- #TODO(sirp): Should we actually fake out the data here
+ # TODO(sirp): Should we actually fake out the data here
return "fakeparent", "fakebase"
stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
diff --git a/nova/tests/virt/xenapi/test_driver.py b/nova/tests/virt/xenapi/test_driver.py
index 8e5c538563..7276cc4b5a 100644
--- a/nova/tests/virt/xenapi/test_driver.py
+++ b/nova/tests/virt/xenapi/test_driver.py
@@ -15,6 +15,8 @@
import math
+import mock
+
from nova.openstack.common import units
from nova.tests.virt import test_driver
from nova.tests.virt.xenapi import stubs
@@ -27,6 +29,12 @@ class XenAPIDriverTestCase(stubs.XenAPITestBaseNoDB,
test_driver.DriverAPITestHelper):
"""Unit tests for Driver operations."""
+ def _get_driver(self):
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
+ self.flags(connection_url='test_url',
+ connection_password='test_pass', group='xenserver')
+ return xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
+
def host_stats(self, refresh=True):
return {'host_memory_total': 3 * units.Mi,
'host_memory_free_computed': 2 * units.Mi,
@@ -40,11 +48,7 @@ def host_stats(self, refresh=True):
'pci_passthrough_devices': ''}
def test_available_resource(self):
- self.flags(connection_url='test_url',
- connection_password='test_pass', group='xenserver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
-
- driver = xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
+ driver = self._get_driver()
driver._session.product_version = (6, 8, 2)
self.stubs.Set(driver, 'get_host_stats', self.host_stats)
@@ -62,10 +66,7 @@ def test_available_resource(self):
self.assertEqual(1, resources['disk_available_least'])
def test_overhead(self):
- self.flags(connection_url='test_url',
- connection_password='test_pass', group='xenserver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- driver = xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
+ driver = self._get_driver()
instance = {'memory_mb': 30720, 'vcpus': 4}
# expected memory overhead per:
@@ -78,10 +79,7 @@ def test_overhead(self):
self.assertEqual(expected, overhead['memory_mb'])
def test_set_bootable(self):
- self.flags(connection_url='test_url', connection_password='test_pass',
- group='xenserver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- driver = xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
+ driver = self._get_driver()
self.mox.StubOutWithMock(driver._vmops, 'set_bootable')
driver._vmops.set_bootable('inst', True)
@@ -89,9 +87,15 @@ def test_set_bootable(self):
driver.set_bootable('inst', True)
+ def test_post_interrupted_snapshot_cleanup(self):
+ driver = self._get_driver()
+ fake_vmops_cleanup = mock.Mock()
+ driver._vmops.post_interrupted_snapshot_cleanup = fake_vmops_cleanup
+
+ driver.post_interrupted_snapshot_cleanup("context", "instance")
+
+ fake_vmops_cleanup.assert_called_once_with("context", "instance")
+
def test_public_api_signatures(self):
- self.flags(connection_url='test_url', connection_password='test_pass',
- group='xenserver')
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- inst = xenapi.XenAPIDriver(fake.FakeVirtAPI(), False)
+ inst = self._get_driver()
self.assertPublicAPISignatures(inst)
diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py
index b302ebb82c..c3ba4e5150 100644
--- a/nova/tests/virt/xenapi/test_vm_utils.py
+++ b/nova/tests/virt/xenapi/test_vm_utils.py
@@ -27,7 +27,7 @@
from nova.compute import vm_mode
from nova import context
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import processutils
from nova.openstack.common import timeutils
from nova.openstack.common import units
@@ -802,8 +802,7 @@ def test_exception_msg_contains_vm_name(self):
try:
vm_utils.vm_ref_or_raise('session', 'somename')
except exception.InstanceNotFound as e:
- self.assertTrue(
- 'somename' in str(e))
+ self.assertIn('somename', str(e))
mock.VerifyAll()
@@ -2311,6 +2310,102 @@ def test_list_vms(self):
self.assertIn(vm_ref, result_keys)
+class ChildVHDsTestCase(test.NoDBTestCase):
+ all_vdis = [
+ ("my-vdi-ref",
+ {"uuid": "my-uuid", "sm_config": {},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("non-parent",
+ {"uuid": "uuid-1", "sm_config": {},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("diff-parent",
+ {"uuid": "uuid-1", "sm_config": {"vhd-parent": "other-uuid"},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("child",
+ {"uuid": "uuid-child", "sm_config": {"vhd-parent": "my-uuid"},
+ "is_a_snapshot": False, "other_config": {}}),
+ ("child-snap",
+ {"uuid": "uuid-child-snap", "sm_config": {"vhd-parent": "my-uuid"},
+ "is_a_snapshot": True, "other_config": {}}),
+ ]
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_defaults(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref", "my-uuid")
+
+ self.assertEqual(['uuid-child', 'uuid-child-snap'], result)
+
+ @mock.patch.object(vm_utils, '_get_all_vdis_in_sr')
+ def test_child_vhds_only_snapshots(self, mock_get_all):
+ mock_get_all.return_value = self.all_vdis
+
+ result = vm_utils._child_vhds("session", "sr_ref", "my-uuid",
+ old_snapshots_only=True)
+
+ self.assertEqual(['uuid-child-snap'], result)
+
+ def test_is_vdi_a_snapshot_works(self):
+ vdi_rec = {"is_a_snapshot": True,
+ "other_config": {}}
+
+ self.assertTrue(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+ def test_is_vdi_a_snapshot_base_images_false(self):
+ vdi_rec = {"is_a_snapshot": True,
+ "other_config": {"image-id": "fake"}}
+
+ self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+ def test_is_vdi_a_snapshot_false_for_non_snapshot(self):
+ vdi_rec = {"is_a_snapshot": False,
+ "other_config": {}}
+
+ self.assertFalse(vm_utils._is_vdi_a_snapshot(vdi_rec))
+
+
+class RemoveOldSnapshotsTestCase(test.NoDBTestCase):
+
+ @mock.patch.object(vm_utils, '_child_vhds')
+ @mock.patch.object(vm_utils, '_get_vhd_parent_uuid')
+ @mock.patch.object(vm_utils, 'get_vdi_for_vm_safely')
+ @mock.patch.object(vm_utils, 'safe_find_sr')
+ def test_get_snapshots_for_vm(self, mock_find, mock_get_vdi,
+ mock_parent, mock_child_vhds):
+ session = mock.Mock()
+ instance = {"uuid": "uuid"}
+ mock_find.return_value = "sr_ref"
+ mock_get_vdi.return_value = ("vm_vdi_ref", "vm_vdi_rec")
+ mock_parent.return_value = "parent_uuid"
+ mock_child_vhds.return_value = []
+
+ result = vm_utils._get_snapshots_for_vm(session, instance, "vm_ref")
+
+ self.assertEqual([], result)
+ mock_find.assert_called_once_with(session)
+ mock_get_vdi.assert_called_once_with(session, "vm_ref")
+ mock_parent.assert_called_once_with(session, "vm_vdi_ref")
+ mock_child_vhds.assert_called_once_with(session, "sr_ref",
+ "parent_uuid", old_snapshots_only=True)
+
+ @mock.patch.object(vm_utils, 'scan_default_sr')
+ @mock.patch.object(vm_utils, 'safe_destroy_vdis')
+ @mock.patch.object(vm_utils, '_get_snapshots_for_vm')
+ def test_remove_old_snapshots(self, mock_get, mock_destroy, mock_scan):
+ session = mock.Mock()
+ instance = {"uuid": "uuid"}
+ mock_get.return_value = ["vdi_uuid1", "vdi_uuid2"]
+ session.VDI.get_by_uuid.return_value = "vdi_ref"
+
+ vm_utils.remove_old_snapshots(session, instance, "vm_ref")
+
+ self.assertTrue(mock_scan.called)
+ session.VDI.get_by_uuid.assert_called_once_with("vdi_uuid1")
+ mock_destroy.assert_called_once_with(session, ["vdi_ref"])
+ mock_scan.assert_called_once_with(session)
+
+
class ResizeFunctionTestCase(test.NoDBTestCase):
def _call_get_resize_func_name(self, brand, version):
session = mock.Mock()
diff --git a/nova/tests/virt/xenapi/test_vmops.py b/nova/tests/virt/xenapi/test_vmops.py
index 47b0db457c..e0e7cbc33d 100644
--- a/nova/tests/virt/xenapi/test_vmops.py
+++ b/nova/tests/virt/xenapi/test_vmops.py
@@ -213,6 +213,8 @@ def _stub_out_common(self):
self.mox.StubOutWithMock(vm_utils, 'determine_disk_image_type')
self.mox.StubOutWithMock(vm_utils, 'get_vdis_for_instance')
self.mox.StubOutWithMock(vm_utils, 'safe_destroy_vdis')
+ self.mox.StubOutWithMock(self.vmops._volumeops,
+ 'safe_cleanup_from_vdis')
self.mox.StubOutWithMock(self.vmops, '_resize_up_vdis')
self.mox.StubOutWithMock(vm_utils,
'create_kernel_and_ramdisk')
@@ -370,6 +372,7 @@ def _test_spawn(self, name_label_param=None, block_device_info_param=None,
vm_utils.destroy_kernel_ramdisk(self.vmops._session, instance,
kernel_file, ramdisk_file)
vm_utils.safe_destroy_vdis(self.vmops._session, ["fake_ref"])
+ self.vmops._volumeops.safe_cleanup_from_vdis(["fake_ref_2"])
self.mox.ReplayAll()
self.vmops.spawn(context, instance, image_meta, injected_files,
@@ -930,6 +933,18 @@ def test_resize_up_vdis_ephemeral_with_generate(self, mock_sizes,
None, 5, 1000)
+@mock.patch.object(vm_utils, 'remove_old_snapshots')
+class CleanupFailedSnapshotTestCase(VMOpsTestBase):
+ def test_post_interrupted_snapshot_cleanup(self, mock_remove):
+ self.vmops._get_vm_opaque_ref = mock.Mock()
+ self.vmops._get_vm_opaque_ref.return_value = "vm_ref"
+
+ self.vmops.post_interrupted_snapshot_cleanup("context", "instance")
+
+ mock_remove.assert_called_once_with(self.vmops._session,
+ "instance", "vm_ref")
+
+
class LiveMigrateHelperTestCase(VMOpsTestBase):
def test_connect_block_device_volumes_none(self):
self.assertEqual({}, self.vmops.connect_block_device_volumes(None))
diff --git a/nova/tests/virt/xenapi/test_volume_utils.py b/nova/tests/virt/xenapi/test_volume_utils.py
index 1779bf2e50..f0049a6b33 100644
--- a/nova/tests/virt/xenapi/test_volume_utils.py
+++ b/nova/tests/virt/xenapi/test_volume_utils.py
@@ -41,6 +41,36 @@ class UUIDException(Exception):
'sr_uuid'),
None)
+ def test_find_sr_from_vdi(self):
+ vdi_ref = 'fake-ref'
+
+ def fake_call_xenapi(method, *args):
+ self.assertEqual(method, 'VDI.get_SR')
+ self.assertEqual(args[0], vdi_ref)
+ return args[0]
+
+ session = mock.Mock()
+ session.call_xenapi.side_effect = fake_call_xenapi
+ self.assertEqual(volume_utils.find_sr_from_vdi(session, vdi_ref),
+ vdi_ref)
+
+ def test_find_sr_from_vdi_exception(self):
+ vdi_ref = 'fake-ref'
+
+ class FakeException(Exception):
+ pass
+
+ def fake_call_xenapi(method, *args):
+ self.assertEqual(method, 'VDI.get_SR')
+ self.assertEqual(args[0], vdi_ref)
+ return args[0]
+
+ session = mock.Mock()
+ session.XenAPI.Failure = FakeException
+ session.call_xenapi.side_effect = FakeException
+ self.assertRaises(exception.StorageError,
+ volume_utils.find_sr_from_vdi, session, vdi_ref)
+
class ISCSIParametersTestCase(stubs.XenAPITestBaseNoDB):
def test_target_host(self):
diff --git a/nova/tests/virt/xenapi/test_volumeops.py b/nova/tests/virt/xenapi/test_volumeops.py
index 5fca3878d4..fbb4ad09af 100644
--- a/nova/tests/virt/xenapi/test_volumeops.py
+++ b/nova/tests/virt/xenapi/test_volumeops.py
@@ -498,3 +498,52 @@ class FakeException(Exception):
self.assertRaises(FakeException,
self.ops.find_bad_volumes, "vm_ref")
mock_scan.assert_called_once_with("sr_ref")
+
+
+class CleanupFromVDIsTestCase(VolumeOpsTestBase):
+ def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs):
+ find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref
+ in vdi_refs]
+ find_sr_from_vdi.assert_has_calls(find_sr_calls)
+ purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref
+ in sr_refs]
+ purge_sr.assert_has_calls(purge_sr_calls)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi')
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref1', 'sr_ref2']
+ find_sr_from_vdi.side_effect = sr_refs
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi',
+ side_effect=[exception.StorageError(reason=''), 'sr_ref2'])
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr,
+ find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref2']
+ find_sr_from_vdi.side_effect = [exception.StorageError(reason=''),
+ sr_refs[0]]
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
+
+ @mock.patch.object(volume_utils, 'find_sr_from_vdi')
+ @mock.patch.object(volume_utils, 'purge_sr')
+ def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr,
+ find_sr_from_vdi):
+ vdi_refs = ['vdi_ref1', 'vdi_ref2']
+ sr_refs = ['sr_ref1', 'sr_ref2']
+ find_sr_from_vdi.side_effect = sr_refs
+ purge_sr.side_effects = [test.TestingException, None]
+ self.ops.safe_cleanup_from_vdis(vdi_refs)
+
+ self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
+ sr_refs)
diff --git a/nova/tests/virt/xenapi/test_xenapi.py b/nova/tests/virt/xenapi/test_xenapi.py
index d801018716..6d98783ca0 100644
--- a/nova/tests/virt/xenapi/test_xenapi.py
+++ b/nova/tests/virt/xenapi/test_xenapi.py
@@ -368,6 +368,22 @@ def test_get_rrd_server(self):
self.assertEqual(server_info[0], 'myscheme')
self.assertEqual(server_info[1], 'myaddress')
+ expected_raw_diagnostics = {
+ 'vbd_xvdb_write': '0.0',
+ 'memory_target': '4294967296.0000',
+ 'memory_internal_free': '1415564.0000',
+ 'memory': '4294967296.0000',
+ 'vbd_xvda_write': '0.0',
+ 'cpu0': '0.0042',
+ 'vif_0_tx': '287.4134',
+ 'vbd_xvda_read': '0.0',
+ 'vif_0_rx': '1816.0144',
+ 'vif_2_rx': '0.0',
+ 'vif_2_tx': '0.0',
+ 'vbd_xvdb_read': '0.0',
+ 'last_update': '1328795567',
+ }
+
def test_get_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
@@ -375,24 +391,47 @@ def fake_get_rrd(host, vm_uuid):
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
- fake_diagnostics = {
- 'vbd_xvdb_write': '0.0',
- 'memory_target': '4294967296.0000',
- 'memory_internal_free': '1415564.0000',
- 'memory': '4294967296.0000',
- 'vbd_xvda_write': '0.0',
- 'cpu0': '0.0042',
- 'vif_0_tx': '287.4134',
- 'vbd_xvda_read': '0.0',
- 'vif_0_rx': '1816.0144',
- 'vif_2_rx': '0.0',
- 'vif_2_tx': '0.0',
- 'vbd_xvdb_read': '0.0',
- 'last_update': '1328795567',
- }
+ expected = self.expected_raw_diagnostics
+ instance = self._create_instance()
+ actual = self.conn.get_diagnostics(instance)
+ self.assertThat(actual, matchers.DictMatches(expected))
+
+ def test_get_instance_diagnostics(self):
+ def fake_get_rrd(host, vm_uuid):
+ path = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(path, 'vm_rrd.xml')) as f:
+ return re.sub(r'\s', '', f.read())
+ self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
+
+ expected = {
+ 'config_drive': False,
+ 'state': 'running',
+ 'driver': 'xenapi',
+ 'version': '1.0',
+ 'uptime': 0,
+ 'hypervisor_os': None,
+ 'cpu_details': [{'time': 0}, {'time': 0},
+ {'time': 0}, {'time': 0}],
+ 'nic_details': [{'mac_address': '00:00:00:00:00:00',
+ 'rx_drop': 0,
+ 'rx_errors': 0,
+ 'rx_octets': 0,
+ 'rx_packets': 0,
+ 'tx_drop': 0,
+ 'tx_errors': 0,
+ 'tx_octets': 0,
+ 'tx_packets': 0}],
+ 'disk_details': [{'errors_count': 0,
+ 'id': '',
+ 'read_bytes': 0,
+ 'read_requests': 0,
+ 'write_bytes': 0,
+ 'write_requests': 0}],
+ 'memory_details': {'maximum': 8192, 'used': 0}}
+
instance = self._create_instance()
- expected = self.conn.get_diagnostics(instance)
- self.assertThat(fake_diagnostics, matchers.DictMatches(expected))
+ actual = self.conn.get_instance_diagnostics(instance)
+ self.assertEqual(expected, actual.serialize())
def test_get_vnc_console(self):
instance = self._create_instance(obj=True)
@@ -527,7 +566,7 @@ def fake_image_upload(_self, ctx, session, inst, vdi_uuids,
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
- self.assertTrue(not name_label.endswith('snapshot'))
+ self.assertFalse(name_label.endswith('snapshot'))
self.assertTrue(self.fake_upload_called)
@@ -559,7 +598,7 @@ def check_vm_record(self, conn, instance_type_id, check_injection):
self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEqual(self.vm['VCPUs_max'], str(vcpus))
self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus))
- if vcpu_weight == None:
+ if vcpu_weight is None:
self.assertEqual(self.vm['VCPUs_params'], {})
else:
self.assertEqual(self.vm['VCPUs_params'],
@@ -894,7 +933,7 @@ def _tee_handler(cmd, **kwargs):
dns-nameservers 192.168.1.3 192.168.1.4
iface eth0 inet6 static
address 2001:db8:0:1::1
- netmask ffff:ffff:ffff:ffff::
+ netmask 64
gateway 2001:db8:0:1::1
"""
self.assertEqual(expected, actual)
@@ -935,7 +974,7 @@ def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
- # Umount would normall make files in the m,ounted filesystem
+ # Umount would normally make files in the mounted filesystem
# disappear, so do that here
LOG.debug('Removing simulated guest agent files in %s',
self._tmpdir)
@@ -1126,7 +1165,7 @@ def test_spawn_with_resetnetwork_alternative_returncode(self):
def fake_resetnetwork(self, method, args):
fake_resetnetwork.called = True
- #NOTE(johngarbutt): as returned by FreeBSD and Gentoo
+ # NOTE(johngarbutt): as returned by FreeBSD and Gentoo
return jsonutils.dumps({'returncode': '500',
'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
@@ -1392,7 +1431,7 @@ def test_uuid_find(self):
fake_inst2 = fake_instance.fake_db_instance(id=456)
db.instance_get_all_by_host(self.context, fake_inst['host'],
columns_to_join=None,
- use_slave=False
+ use_subordinate=False
).AndReturn([fake_inst, fake_inst2])
self.mox.ReplayAll()
expected_name = CONF.instance_name_template % fake_inst['id']
@@ -1409,7 +1448,7 @@ def fake_aggregate_get_by_host(self, *args, **kwargs):
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
- self.stubs.Set(self.conn._session, "is_slave", True)
+ self.stubs.Set(self.conn._session, "is_subordinate", True)
self.assertRaises(test.TestingException,
self.conn._session._get_host_uuid)
@@ -2702,7 +2741,7 @@ def test_do_refresh_security_group_rules(self):
'from_port': 200,
'to_port': 299,
'cidr': '192.168.99.0/24'})
- #validate the extra rule
+ # validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
@@ -2828,9 +2867,9 @@ def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
'fake_host2'],
'avail_zone2': ['fake_host3'], }):
for avail_zone, hosts in values.iteritems():
- for host in hosts:
+ for service_host in hosts:
db.service_create(context,
- {'host': host,
+ {'host': service_host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0})
@@ -2864,7 +2903,7 @@ def setUp(self):
pool_states.POOL_FLAG: 'XenAPI'}}
self.aggr = db.aggregate_create(self.context, values)
self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
- 'master_compute': 'host',
+ 'main_compute': 'host',
'availability_zone': 'fake_zone',
pool_states.KEY: pool_states.ACTIVE,
'host': xenapi_fake.get_record('host',
@@ -2874,18 +2913,18 @@ def test_pool_add_to_aggregate_called_by_driver(self):
calls = []
- def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
+ def pool_add_to_aggregate(context, aggregate, host, subordinate_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
- self.assertEqual("SLAVEINFO", slave_info)
+ self.assertEqual("SLAVEINFO", subordinate_info)
calls.append(pool_add_to_aggregate)
self.stubs.Set(self.conn._pool,
"add_to_aggregate",
pool_add_to_aggregate)
self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
- slave_info="SLAVEINFO")
+ subordinate_info="SLAVEINFO")
self.assertIn(pool_add_to_aggregate, calls)
@@ -2894,18 +2933,18 @@ def test_pool_remove_from_aggregate_called_by_driver(self):
calls = []
def pool_remove_from_aggregate(context, aggregate, host,
- slave_info=None):
+ subordinate_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
- self.assertEqual("SLAVEINFO", slave_info)
+ self.assertEqual("SLAVEINFO", subordinate_info)
calls.append(pool_remove_from_aggregate)
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
pool_remove_from_aggregate)
self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
- slave_info="SLAVEINFO")
+ subordinate_info="SLAVEINFO")
self.assertIn(pool_remove_from_aggregate, calls)
@@ -2921,11 +2960,11 @@ def fake_init_pool(id, name):
self.assertThat(self.fake_metadata,
matchers.DictMatches(result['metadetails']))
- def test_join_slave(self):
- # Ensure join_slave gets called when the request gets to master.
- def fake_join_slave(id, compute_uuid, host, url, user, password):
- fake_join_slave.called = True
- self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
+ def test_join_subordinate(self):
+ # Ensure join_subordinate gets called when the request gets to main.
+ def fake_join_subordinate(id, compute_uuid, host, url, user, password):
+ fake_join_subordinate.called = True
+ self.stubs.Set(self.conn._pool, "_join_subordinate", fake_join_subordinate)
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
@@ -2935,7 +2974,7 @@ def fake_join_slave(id, compute_uuid, host, url, user, password):
user='fake_user',
passwd='fake_pass',
xenhost_uuid='fake_uuid'))
- self.assertTrue(fake_join_slave.called)
+ self.assertTrue(fake_join_subordinate.called)
def test_add_to_aggregate_first_host(self):
def fake_pool_set_name_label(self, session, pool_ref, name):
@@ -2975,19 +3014,19 @@ def test_remove_from_empty_aggregate(self):
self.conn._pool.remove_from_aggregate,
self.context, result, "test_host")
- def test_remove_slave(self):
- # Ensure eject slave gets called.
- def fake_eject_slave(id, compute_uuid, host_uuid):
- fake_eject_slave.called = True
- self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
+ def test_remove_subordinate(self):
+ # Ensure eject subordinate gets called.
+ def fake_eject_subordinate(id, compute_uuid, host_uuid):
+ fake_eject_subordinate.called = True
+ self.stubs.Set(self.conn._pool, "_eject_subordinate", fake_eject_subordinate)
self.fake_metadata['host2'] = 'fake_host2_uuid'
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
- self.assertTrue(fake_eject_slave.called)
+ self.assertTrue(fake_eject_subordinate.called)
- def test_remove_master_solo(self):
+ def test_remove_main_solo(self):
# Ensure metadata are cleared after removal.
def fake_clear_pool(id):
fake_clear_pool.called = True
@@ -3002,8 +3041,8 @@ def fake_clear_pool(id):
pool_states.KEY: pool_states.ACTIVE},
matchers.DictMatches(result['metadetails']))
- def test_remote_master_non_empty_pool(self):
- # Ensure AggregateError is raised if removing the master.
+ def test_remote_main_non_empty_pool(self):
+ # Ensure AggregateError is raised if removing the main.
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
@@ -3024,8 +3063,8 @@ def _aggregate_setup(self, aggr_name='fake_aggregate',
if metadata:
aggregate.metadata.update(metadata)
aggregate.create(self.context)
- for host in hosts:
- aggregate.add_host(host)
+ for aggregate_host in hosts:
+ aggregate.add_host(aggregate_host)
return aggregate
def test_add_host_to_aggregate_invalid_changing_status(self):
@@ -3033,27 +3072,30 @@ def test_add_host_to_aggregate_invalid_changing_status(self):
aggregate is not ready.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
- self.assertRaises(exception.InvalidAggregateAction,
- self.conn.add_to_aggregate, self.context,
- aggregate, 'host')
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'host')
+ self.assertIn('setup in progress', str(ex))
def test_add_host_to_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
- self.assertRaises(exception.InvalidAggregateAction,
- self.conn.add_to_aggregate, self.context,
- aggregate, 'fake_host')
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'fake_host')
+ self.assertIn('aggregate deleted', str(ex))
def test_add_host_to_aggregate_invalid_error_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
in error.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
- self.assertRaises(exception.InvalidAggregateAction,
- self.conn.add_to_aggregate, self.context,
- aggregate, 'fake_host')
+ ex = self.assertRaises(exception.InvalidAggregateAction,
+ self.conn.add_to_aggregate, self.context,
+ aggregate, 'fake_host')
+ self.assertIn('aggregate in error', str(ex))
def test_remove_host_from_aggregate_error(self):
# Ensure we can remove a host from an aggregate even if in error.
@@ -3065,9 +3107,9 @@ def test_remove_host_from_aggregate_error(self):
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
db.aggregate_metadata_add(self.context, aggr['id'], metadata)
- for host in values[fake_zone]:
+ for aggregate_host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
- aggr['id'], host)
+ aggr['id'], aggregate_host)
# let's mock the fact that the aggregate is in error!
expected = self.api.remove_host_from_aggregate(self.context,
aggr['id'],
@@ -3110,7 +3152,7 @@ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
self.compute.add_aggregate_host,
self.context, host="fake_host",
aggregate=jsonutils.to_primitive(self.aggr),
- slave_info=None)
+ subordinate_info=None)
excepted = db.aggregate_get(self.context, self.aggr['id'])
self.assertEqual(excepted['metadetails'][pool_states.KEY],
pool_states.ERROR)
@@ -3122,16 +3164,16 @@ def __init__(self):
self._mock_calls = []
def add_aggregate_host(self, ctxt, aggregate,
- host_param, host, slave_info):
+ host_param, host, subordinate_info):
self._mock_calls.append((
self.add_aggregate_host, ctxt, aggregate,
- host_param, host, slave_info))
+ host_param, host, subordinate_info))
def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
- host, slave_info):
+ host, subordinate_info):
self._mock_calls.append((
self.remove_aggregate_host, ctxt, aggregate_id,
- host_param, host, slave_info))
+ host_param, host, subordinate_info))
class StubDependencies(object):
@@ -3146,10 +3188,10 @@ def _is_hv_pool(self, *_ignore):
def _get_metadata(self, *_ignore):
return {
pool_states.KEY: {},
- 'master_compute': 'master'
+ 'main_compute': 'main'
}
- def _create_slave_info(self, *ignore):
+ def _create_subordinate_info(self, *ignore):
return "SLAVE_INFO"
@@ -3163,32 +3205,32 @@ class HypervisorPoolTestCase(test.NoDBTestCase):
'id': 98,
'hosts': [],
'metadata': {
- 'master_compute': 'master',
+ 'main_compute': 'main',
pool_states.POOL_FLAG: {},
pool_states.KEY: {}
}
}
- def test_slave_asks_master_to_add_slave_to_pool(self):
- slave = ResourcePoolWithStubs()
+ def test_subordinate_asks_main_to_add_subordinate_to_pool(self):
+ subordinate = ResourcePoolWithStubs()
- slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
+ subordinate.add_to_aggregate("CONTEXT", self.fake_aggregate, "subordinate")
self.assertIn(
- (slave.compute_rpcapi.add_aggregate_host,
+ (subordinate.compute_rpcapi.add_aggregate_host,
"CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
- "slave", "master", "SLAVE_INFO"),
- slave.compute_rpcapi._mock_calls)
+ "subordinate", "main", "SLAVE_INFO"),
+ subordinate.compute_rpcapi._mock_calls)
- def test_slave_asks_master_to_remove_slave_from_pool(self):
- slave = ResourcePoolWithStubs()
+ def test_subordinate_asks_main_to_remove_subordinate_from_pool(self):
+ subordinate = ResourcePoolWithStubs()
- slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
+ subordinate.remove_from_aggregate("CONTEXT", self.fake_aggregate, "subordinate")
self.assertIn(
- (slave.compute_rpcapi.remove_aggregate_host,
- "CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
- slave.compute_rpcapi._mock_calls)
+ (subordinate.compute_rpcapi.remove_aggregate_host,
+ "CONTEXT", 98, "subordinate", "main", "SLAVE_INFO"),
+ subordinate.compute_rpcapi._mock_calls)
class SwapXapiHostTestCase(test.NoDBTestCase):
diff --git a/nova/tests/volume/test_cinder.py b/nova/tests/volume/test_cinder.py
index affa85a7c9..3551bd281b 100644
--- a/nova/tests/volume/test_cinder.py
+++ b/nova/tests/volume/test_cinder.py
@@ -41,6 +41,22 @@ def __init__(self):
self.volume_snapshots = self.volumes
+class FakeVolume(object):
+ def __init__(self, dict=dict()):
+ self.id = dict.get('id') or '1234'
+ self.status = dict.get('status') or 'available'
+ self.size = dict.get('size') or 1
+ self.availability_zone = dict.get('availability_zone') or 'cinder'
+ self.created_at = dict.get('created_at')
+ self.attach_time = dict.get('attach_time')
+ self.mountpoint = dict.get('mountpoint')
+ self.display_name = dict.get('display_name') or 'volume-' + self.id
+ self.display_description = dict.get('display_description') or 'fake'
+ self.volume_type_id = dict.get('volume_type_id')
+ self.snapshot_id = dict.get('snapshot_id')
+ self.metadata = dict.get('volume_metadata') or {}
+
+
class CinderApiTestCase(test.NoDBTestCase):
def setUp(self):
super(CinderApiTestCase, self).setUp()
@@ -51,6 +67,7 @@ def setUp(self):
self.mox.StubOutWithMock(cinder, 'cinderclient')
self.mox.StubOutWithMock(cinder, '_untranslate_volume_summary_view')
self.mox.StubOutWithMock(cinder, '_untranslate_snapshot_summary_view')
+ self.mox.StubOutWithMock(cinder, 'get_cinder_client_version')
def test_get(self):
volume_id = 'volume_id1'
@@ -76,6 +93,7 @@ def test_get_failed(self):
self.api.get, self.ctx, volume_id)
def test_create(self):
+ cinder.get_cinder_client_version(self.ctx).AndReturn('2')
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
cinder._untranslate_volume_summary_view(self.ctx, {'id': 'created_id'})
self.mox.ReplayAll()
@@ -83,22 +101,26 @@ def test_create(self):
self.api.create(self.ctx, 1, '', '')
def test_create_failed(self):
+ cinder.get_cinder_client_version(self.ctx).AndReturn('2')
cinder.cinderclient(self.ctx).AndRaise(cinder_exception.BadRequest(''))
self.mox.ReplayAll()
self.assertRaises(exception.InvalidInput,
self.api.create, self.ctx, 1, '', '')
+ @mock.patch('nova.volume.cinder.get_cinder_client_version')
@mock.patch('nova.volume.cinder.cinderclient')
- def test_create_over_quota_failed(self, mock_cinderclient):
+ def test_create_over_quota_failed(self, mock_cinderclient,
+ mock_get_version):
+ mock_get_version.return_value = '2'
mock_cinderclient.return_value.volumes.create.side_effect = (
cinder_exception.OverLimit(413))
self.assertRaises(exception.OverQuota, self.api.create, self.ctx,
1, '', '')
mock_cinderclient.return_value.volumes.create.assert_called_once_with(
1, user_id=None, imageRef=None, availability_zone=None,
- volume_type=None, display_description='', snapshot_id=None,
- display_name='', project_id=None, metadata=None)
+ volume_type=None, description='', snapshot_id=None, name='',
+ project_id=None, metadata=None)
def test_get_all(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
@@ -145,11 +167,11 @@ def test_check_attach_availability_zone_differs(self):
volume['availability_zone'] = 'zone1'
self.assertIsNone(self.api.check_attach(
self.ctx, volume, instance))
- mock_get_instance_az.assert_not_called()
+ self.assertFalse(mock_get_instance_az.called)
volume['availability_zone'] = 'zone2'
self.assertRaises(exception.InvalidVolume,
self.api.check_attach, self.ctx, volume, instance)
- mock_get_instance_az.assert_not_called()
+ self.assertFalse(mock_get_instance_az.called)
cinder.CONF.reset()
def test_check_attach(self):
@@ -327,21 +349,85 @@ def test_delete_snapshot(self):
self.api.delete_snapshot(self.ctx, 'id1')
- def test_get_volume_metadata(self):
- self.assertRaises(NotImplementedError,
- self.api.get_volume_metadata, self.ctx, '')
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_get_volume_metadata(self, mock_cinderclient):
+ volume_id = 'id1'
+ metadata = {'key1': 'value1', 'key2': 'value2'}
+ volume = FakeVolume({'id': volume_id, 'volume_metadata': metadata})
- def test_get_volume_metadata_value(self):
- self.assertRaises(NotImplementedError,
- self.api.get_volume_metadata_value, '', '')
+ mock_volumes = mock.MagicMock()
+ mock_volumes.get.return_value = volume
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
- def test_delete_volume_metadata(self):
- self.assertRaises(NotImplementedError,
- self.api.delete_volume_metadata, self.ctx, '', '')
+ results = self.api.get_volume_metadata(self.ctx, volume_id)
- def test_update_volume_metadata(self):
- self.assertRaises(NotImplementedError,
- self.api.update_volume_metadata, self.ctx, '', '')
+ mock_cinderclient.assert_called_once_with(self.ctx)
+ mock_volumes.get.assert_called_once_with(volume_id)
+ self.assertEqual(results, metadata)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_get_volume_metadata_value(self, mock_cinderclient):
+ volume_id = 'id1'
+ metadata = {'key1': 'value1'}
+ volume = FakeVolume({'id': volume_id, 'volume_metadata': metadata})
+
+ mock_volumes = mock.MagicMock()
+ mock_volumes.get.return_value = volume
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
+
+ results = self.api.get_volume_metadata_value(self.ctx, volume_id,
+ 'key1')
+ mock_cinderclient.assert_called_once_with(self.ctx)
+ mock_volumes.get.assert_called_once_with(volume_id)
+ self.assertEqual(results, 'value1')
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_delete_volume_metadata(self, mock_cinderclient):
+ volume_id = 'id1'
+ keys = ['key1', 'key2', 'key3']
+
+ mock_volumes = mock.MagicMock()
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
+
+ self.api.delete_volume_metadata(self.ctx, volume_id, keys)
+
+ mock_cinderclient.assert_called_once_with(self.ctx)
+ mock_volumes.delete_metadata.assert_called_once_with(volume_id, keys)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_update_volume_metadata(self, mock_cinderclient):
+ volume_id = 'id1'
+ metadata = {'key1': 'value1'}
+
+ mock_volumes = mock.MagicMock()
+ mock_volumes.set_metadata.return_value = metadata
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
+
+ updated_meta = self.api.update_volume_metadata(self.ctx, volume_id,
+ metadata)
+
+ mock_cinderclient.assert_called_once_with(self.ctx)
+ self.assertFalse(mock_volumes.update_all_metadata.called)
+ mock_volumes.set_metadata.assert_called_once_with(volume_id, metadata)
+ self.assertEqual(metadata, updated_meta)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_update_volume_metadata_delete(self, mock_cinderclient):
+ volume_id = 'id1'
+ metadata = {'key1': 'value1', 'key2': 'value2'}
+
+ mock_volumes = mock.MagicMock()
+ mock_volumes.update_all_metadata.return_value = metadata
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
+
+ updated_meta = self.api.update_volume_metadata(self.ctx, volume_id,
+ metadata, delete=True)
+
+ mock_cinderclient.assert_called_once_with(self.ctx)
+ mock_volumes.update_all_metadata.assert_called_once_with(volume_id,
+ metadata)
+ self.assertFalse(mock_volumes.set_metadata.called)
+ self.assertEqual(metadata, updated_meta)
def test_update_snapshot_status(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
diff --git a/nova/utils.py b/nova/utils.py
index 887617941d..6aea79149e 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -41,9 +41,8 @@
import six
from nova import exception
+from nova.i18n import _
from nova.openstack.common import excutils
-from nova.openstack.common import gettextutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
@@ -90,7 +89,7 @@
'SECOND': 1,
'MINUTE': 60,
'HOUR': 3600,
- 'DAY': 84400
+ 'DAY': 86400
}
@@ -444,8 +443,6 @@ def utf8(value):
"""
if isinstance(value, unicode):
return value.encode('utf-8')
- elif isinstance(value, gettextutils.Message):
- return unicode(value).encode('utf-8')
assert isinstance(value, str)
return value
@@ -577,7 +574,7 @@ def monkey_patch():
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example:
- 'nova.api.ec2.cloud:nova.notifications.notify_decorator'
+ 'nova.api.ec2.cloud:nova.notifications.notify_decorator'
Parameters of the decorator is as follows.
(See nova.notifications.notify_decorator)
@@ -828,7 +825,7 @@ def mkfs(fs, path, label=None, run_as_root=False):
args = ['mkswap']
else:
args = ['mkfs', '-t', fs]
- #add -F to force no interactive execute on non-block device.
+ # add -F to force no interactive execute on non-block device.
if fs in ('ext3', 'ext4', 'ntfs'):
args.extend(['-F'])
if label:
@@ -955,7 +952,7 @@ def wrapper(*args, **kwargs):
return wrapper
-def check_string_length(value, name, min_length=0, max_length=None):
+def check_string_length(value, name=None, min_length=0, max_length=None):
"""Check the length of specified string
:param value: the value of the string
:param name: the name of the string
@@ -963,9 +960,15 @@ def check_string_length(value, name, min_length=0, max_length=None):
:param max_length: the max_length of the string
"""
if not isinstance(value, six.string_types):
- msg = _("%s is not a string or unicode") % name
+ if name is None:
+ msg = _("The input is not a string or unicode")
+ else:
+ msg = _("%s is not a string or unicode") % name
raise exception.InvalidInput(message=msg)
+ if name is None:
+ name = value
+
if len(value) < min_length:
msg = _("%(name)s has a minimum character requirement of "
"%(min_length)s.") % {'name': name, 'min_length': min_length}
diff --git a/nova/version.py b/nova/version.py
index 4d6faa1e08..7c2a71b39d 100644
--- a/nova/version.py
+++ b/nova/version.py
@@ -14,7 +14,7 @@
import pbr.version
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
NOVA_VENDOR = "OpenStack Foundation"
NOVA_PRODUCT = "OpenStack Nova"
diff --git a/nova/virt/baremetal/baremetal_states.py b/nova/virt/baremetal/baremetal_states.py
index e48382f5b6..82e41fb3ce 100644
--- a/nova/virt/baremetal/baremetal_states.py
+++ b/nova/virt/baremetal/baremetal_states.py
@@ -24,8 +24,6 @@
"""
-NULL = None
-INIT = 'initializing'
ACTIVE = 'active'
BUILDING = 'building'
DEPLOYING = 'deploying'
diff --git a/nova/virt/baremetal/common.py b/nova/virt/baremetal/common.py
index de0f6d8e0c..94165007e6 100644
--- a/nova/virt/baremetal/common.py
+++ b/nova/virt/baremetal/common.py
@@ -15,7 +15,7 @@
import paramiko
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/virt/baremetal/db/sqlalchemy/api.py b/nova/virt/baremetal/db/sqlalchemy/api.py
index 35e70a4be8..21813be6a6 100644
--- a/nova/virt/baremetal/db/sqlalchemy/api.py
+++ b/nova/virt/baremetal/db/sqlalchemy/api.py
@@ -22,12 +22,13 @@
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import literal_column
+from sqlalchemy.sql import null
import nova.context
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
+from nova.i18n import _
from nova.openstack.common.db import exception as db_exc
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova.virt.baremetal.db.sqlalchemy import models
@@ -91,7 +92,7 @@ def bm_node_get_all(context, service_host=None):
@sqlalchemy_api.require_admin_context
def bm_node_get_associated(context, service_host=None):
query = model_query(context, models.BareMetalNode, read_deleted="no").\
- filter(models.BareMetalNode.instance_uuid != None)
+ filter(models.BareMetalNode.instance_uuid != null())
if service_host:
query = query.filter_by(service_host=service_host)
return query.all()
@@ -100,7 +101,7 @@ def bm_node_get_associated(context, service_host=None):
@sqlalchemy_api.require_admin_context
def bm_node_get_unassociated(context, service_host=None):
query = model_query(context, models.BareMetalNode, read_deleted="no").\
- filter(models.BareMetalNode.instance_uuid == None)
+ filter(models.BareMetalNode.instance_uuid == null())
if service_host:
query = query.filter_by(service_host=service_host)
return query.all()
@@ -110,7 +111,7 @@ def bm_node_get_unassociated(context, service_host=None):
def bm_node_find_free(context, service_host=None,
cpus=None, memory_mb=None, local_gb=None):
query = model_query(context, models.BareMetalNode, read_deleted="no")
- query = query.filter(models.BareMetalNode.instance_uuid == None)
+ query = query.filter(models.BareMetalNode.instance_uuid == null())
if service_host:
query = query.filter_by(service_host=service_host)
if cpus is not None:
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py
index 0cf0b637ab..351ca20f64 100644
--- a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py
@@ -41,7 +41,6 @@ def upgrade(migrate_engine):
Column('prov_vlan_id', Integer),
Column('terminal_port', Integer),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
)
bm_interfaces = Table('bm_interfaces', meta,
@@ -56,7 +55,6 @@ def upgrade(migrate_engine):
Column('port_no', Integer),
Column('vif_uuid', String(length=36), unique=True),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
)
bm_pxe_ips = Table('bm_pxe_ips', meta,
@@ -69,7 +67,6 @@ def upgrade(migrate_engine):
Column('bm_node_id', Integer),
Column('server_address', String(length=255), unique=True),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
)
bm_deployments = Table('bm_deployments', meta,
@@ -85,7 +82,6 @@ def upgrade(migrate_engine):
Column('root_mb', Integer),
Column('swap_mb', Integer),
mysql_engine='InnoDB',
- #mysql_charset='utf8'
)
bm_nodes.create()
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/006_move_prov_mac_address.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/006_move_prov_mac_address.py
index 5a1ec451a0..8921f3eb58 100644
--- a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/006_move_prov_mac_address.py
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/006_move_prov_mac_address.py
@@ -13,8 +13,9 @@
# under the License.
from nova.openstack.common import log as logging
-from sqlalchemy import and_, MetaData, select, Table, exists
+from sqlalchemy import MetaData, Table, exists
from sqlalchemy import exc
+from sqlalchemy import sql
LOG = logging.getLogger(__name__)
@@ -26,7 +27,7 @@ def upgrade(migrate_engine):
nodes = Table('bm_nodes', meta, autoload=True)
ifs = Table('bm_interfaces', meta, autoload=True)
- q = select([nodes.c.id, nodes.c.prov_mac_address],
+ q = sql.select([nodes.c.id, nodes.c.prov_mac_address],
from_obj=nodes)
# Iterate all elements before starting insert since IntegrityError
@@ -52,8 +53,9 @@ def downgrade(migrate_engine):
nodes = Table('bm_nodes', meta, autoload=True)
ifs = Table('bm_interfaces', meta, autoload=True)
- subq = exists().where(and_(ifs.c.bm_node_id == nodes.c.id,
- ifs.c.address == nodes.c.prov_mac_address))
+ subq = exists().where(sql.and_(
+ ifs.c.bm_node_id == nodes.c.id,
+ ifs.c.address == nodes.c.prov_mac_address))
ifs.delete().where(subq).execute()
diff --git a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/010_add_preserve_ephemeral.py b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/010_add_preserve_ephemeral.py
index c95c66d168..2cd5745327 100644
--- a/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/010_add_preserve_ephemeral.py
+++ b/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/010_add_preserve_ephemeral.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-from sqlalchemy import Column, MetaData, Boolean, Table, text
+from sqlalchemy import Column, MetaData, Boolean, Table
+from sqlalchemy.sql import expression
COLUMN_NAME = 'preserve_ephemeral'
@@ -25,7 +26,8 @@ def upgrade(migrate_engine):
meta.bind = migrate_engine
t = Table(TABLE_NAME, meta, autoload=True)
- default = text('0') if migrate_engine.name == 'sqlite' else text('false')
+ default = (expression.text('0') if migrate_engine.name == 'sqlite'
+ else expression.text('false'))
preserve_ephemeral_col = Column(COLUMN_NAME, Boolean,
server_default=default)
t.create_column(preserve_ephemeral_col)
diff --git a/nova/virt/baremetal/db/sqlalchemy/migration.py b/nova/virt/baremetal/db/sqlalchemy/migration.py
index 39212e668c..27beb89f16 100644
--- a/nova/virt/baremetal/db/sqlalchemy/migration.py
+++ b/nova/virt/baremetal/db/sqlalchemy/migration.py
@@ -22,7 +22,7 @@
import sqlalchemy
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.virt.baremetal.db.sqlalchemy import session
INIT_VERSION = 0
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 705494145a..7f94a98623 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -27,8 +27,8 @@
from nova.compute import task_states
from nova import context as nova_context
from nova import exception
+from nova.i18n import _
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
@@ -218,6 +218,9 @@ def _stop_firewall(self, instance, network_info):
self.firewall_driver.unfilter_instance(
instance, network_info)
+ def deallocate_networks_on_reschedule(self, instance):
+ return True
+
def macs_for_instance(self, instance):
context = nova_context.get_admin_context()
node_uuid = self._require_node(instance)
@@ -400,12 +403,13 @@ def destroy(self, context, instance, network_info, block_device_info=None,
"baremetal database: %s") % e)
def cleanup(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True, migrate_data=None):
+ destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed."""
pass
- def power_off(self, instance, node=None):
+ def power_off(self, instance, timeout=0, retry_interval=0, node=None):
"""Power off the specified instance."""
+ # TODO(PhilDay): Add support for timeout (clean shutdown)
if not node:
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance)
diff --git a/nova/virt/baremetal/iboot_pdu.py b/nova/virt/baremetal/iboot_pdu.py
index f32fa1d03e..16d037b38c 100644
--- a/nova/virt/baremetal/iboot_pdu.py
+++ b/nova/virt/baremetal/iboot_pdu.py
@@ -16,7 +16,7 @@
# iBoot Power Driver
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.virt.baremetal import baremetal_states
diff --git a/nova/virt/baremetal/ipmi.py b/nova/virt/baremetal/ipmi.py
index f13b0de8a9..83dbe4d5bb 100644
--- a/nova/virt/baremetal/ipmi.py
+++ b/nova/virt/baremetal/ipmi.py
@@ -27,7 +27,7 @@
from oslo.config import cfg
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova import paths
@@ -107,16 +107,16 @@ def __init__(self, node, **kwargs):
self.password = node['pm_password']
self.port = node['terminal_port']
- if self.node_id == None:
+ if self.node_id is None:
raise exception.InvalidParameterValue(_("Node id not supplied "
"to IPMI"))
- if self.address == None:
+ if self.address is None:
raise exception.InvalidParameterValue(_("Address not supplied "
"to IPMI"))
- if self.user == None:
+ if self.user is None:
raise exception.InvalidParameterValue(_("User not supplied "
"to IPMI"))
- if self.password == None:
+ if self.password is None:
raise exception.InvalidParameterValue(_("Password not supplied "
"to IPMI"))
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index f7c8ff644d..0266d347ee 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -26,10 +26,10 @@
from nova.compute import flavors
from nova import exception
-from nova.objects import flavor as flavor_obj
+from nova.i18n import _
+from nova import objects
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import fileutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import timeutils
@@ -195,7 +195,7 @@ def get_tftp_image_info(instance, flavor):
Raises NovaException if
- instance does not contain kernel_id or ramdisk_id
- deploy_kernel_id or deploy_ramdisk_id can not be read from
- flavor['extra_specs'] and defaults are not set
+ flavor['extra_specs'] and defaults are not set
"""
image_info = {
@@ -334,8 +334,8 @@ def _inject_into_image(self, context, node, instance, network_info,
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
"""Prepare all the images for this instance."""
- flavor = flavor_obj.Flavor.get_by_id(context,
- instance['instance_type_id'])
+ flavor = objects.Flavor.get_by_id(context,
+ instance['instance_type_id'])
tftp_image_info = get_tftp_image_info(instance, flavor)
self._cache_tftp_images(context, instance, tftp_image_info)
@@ -379,8 +379,8 @@ def activate_bootloader(self, context, node, instance, network_info):
./pxelinux.cfg/
{mac} -> ../{uuid}/config
"""
- flavor = flavor_obj.Flavor.get_by_id(context,
- instance['instance_type_id'])
+ flavor = objects.Flavor.get_by_id(context,
+ instance['instance_type_id'])
image_info = get_tftp_image_info(instance, flavor)
(root_mb, swap_mb, ephemeral_mb) = get_partition_sizes(instance)
pxe_config_file_path = get_pxe_config_file_path(instance)
@@ -468,7 +468,7 @@ def _wait_for_deploy():
status = row.get('task_state')
if (status == baremetal_states.DEPLOYING
- and locals['started'] == False):
+ and locals['started'] is False):
LOG.info(_("PXE deploy started for instance %s")
% instance['uuid'])
locals['started'] = True
diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py
index 67c562598b..fa324782bf 100644
--- a/nova/virt/baremetal/tilera.py
+++ b/nova/virt/baremetal/tilera.py
@@ -25,9 +25,9 @@
from nova.compute import flavors
from nova import exception
+from nova.i18n import _
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common import fileutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.baremetal import baremetal_states
@@ -233,12 +233,13 @@ def activate_bootloader(self, context, node, instance, network_info):
This method writes the instances config file, and then creates
symlinks for each MAC address in the instance.
- By default, the complete layout looks like this:
+ By default, the complete layout looks like this::
+
+ /tftpboot/
+ ./{uuid}/
+ kernel
+ ./fs_node_id/
- /tftpboot/
- ./{uuid}/
- kernel
- ./fs_node_id/
"""
get_tftp_image_info(instance)
(root_mb, swap_mb) = get_partition_sizes(instance)
@@ -319,7 +320,7 @@ def activate_node(self, context, node, instance):
status = row.get('task_state')
if (status == baremetal_states.DEPLOYING and
- locals['started'] == False):
+ locals['started'] is False):
LOG.info(_('Tilera deploy started for instance %s')
% instance['uuid'])
locals['started'] = True
diff --git a/nova/virt/baremetal/tilera_pdu.py b/nova/virt/baremetal/tilera_pdu.py
index 87d8435bf4..8bbb1a0615 100644
--- a/nova/virt/baremetal/tilera_pdu.py
+++ b/nova/virt/baremetal/tilera_pdu.py
@@ -24,7 +24,7 @@
from oslo.config import cfg
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
@@ -79,16 +79,16 @@ def __init__(self, node, **kwargs):
self.password = node['pm_password']
self.port = node['terminal_port']
- if self.node_id == None:
+ if self.node_id is None:
raise exception.InvalidParameterValue(_("Node id not supplied "
"to PDU"))
- if self.address == None:
+ if self.address is None:
raise exception.InvalidParameterValue(_("Address not supplied "
"to PDU"))
- if self.user == None:
+ if self.user is None:
raise exception.InvalidParameterValue(_("User not supplied "
"to PDU"))
- if self.password == None:
+ if self.password is None:
raise exception.InvalidParameterValue(_("Password not supplied "
"to PDU"))
diff --git a/nova/virt/baremetal/utils.py b/nova/virt/baremetal/utils.py
index c54d3c14dd..0de4e27ae0 100644
--- a/nova/virt/baremetal/utils.py
+++ b/nova/virt/baremetal/utils.py
@@ -18,7 +18,7 @@
import os
import shutil
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.disk import api as disk_api
from nova.virt.libvirt import utils as libvirt_utils
diff --git a/nova/virt/baremetal/vif_driver.py b/nova/virt/baremetal/vif_driver.py
index 04255d08a3..f71cd39835 100644
--- a/nova/virt/baremetal/vif_driver.py
+++ b/nova/virt/baremetal/vif_driver.py
@@ -17,7 +17,7 @@
from nova import context
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.baremetal import db as bmdb
diff --git a/nova/virt/baremetal/virtual_power_driver.py b/nova/virt/baremetal/virtual_power_driver.py
index 34e9e0e5bd..7ff6703409 100644
--- a/nova/virt/baremetal/virtual_power_driver.py
+++ b/nova/virt/baremetal/virtual_power_driver.py
@@ -19,7 +19,7 @@
from nova import context as nova_context
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py
index 3cbe878770..f07e988e70 100644
--- a/nova/virt/baremetal/volume_driver.py
+++ b/nova/virt/baremetal/volume_driver.py
@@ -21,8 +21,8 @@
from nova import context as nova_context
from nova import exception
+from nova.i18n import _
from nova import network
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py
index 67b3064021..e58d6be1ea 100644
--- a/nova/virt/block_device.py
+++ b/nova/virt/block_device.py
@@ -16,10 +16,11 @@
import operator
from nova import block_device
+from nova.i18n import _
+from nova.i18n import _LI
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.volume import encryptors
@@ -101,15 +102,13 @@ def __getattr__(self, name):
if name in self._proxy_as_attr:
return getattr(self._bdm_obj, name)
else:
- raise AttributeError("Cannot access %s on DriverBlockDevice "
- "class" % name)
+ super(DriverBlockDevice, self).__getattr__(name)
def __setattr__(self, name, value):
if name in self._proxy_as_attr:
return setattr(self._bdm_obj, name, value)
else:
- raise AttributeError("Cannot access %s on DriverBlockDevice "
- "class" % name)
+ super(DriverBlockDevice, self).__setattr__(name, value)
def _transform(self):
"""Transform bdm to the format that is passed to drivers."""
@@ -131,10 +130,14 @@ def attach(self, **kwargs):
"""
raise NotImplementedError()
- def save(self, context):
+ def save(self, context=None):
for attr_name, key_name in self._update_on_save.iteritems():
setattr(self._bdm_obj, attr_name, self[key_name or attr_name])
- self._bdm_obj.save(context)
+
+ if context:
+ self._bdm_obj.save(context)
+ else:
+ self._bdm_obj.save()
class DriverSwapBlockDevice(DriverBlockDevice):
@@ -210,6 +213,14 @@ def _transform(self):
except TypeError:
self['connection_info'] = None
+ def _preserve_multipath_id(self, connection_info):
+ if self['connection_info'] and 'data' in self['connection_info']:
+ if 'multipath_id' in self['connection_info']['data']:
+ connection_info['data']['multipath_id'] =\
+ self['connection_info']['data']['multipath_id']
+ LOG.info(_LI('preserve multipath_id %s'),
+ connection_info['data']['multipath_id'])
+
@update_db
def attach(self, context, instance, volume_api, virt_driver,
do_check_attach=True, do_driver_attach=False):
@@ -226,6 +237,7 @@ def attach(self, context, instance, volume_api, virt_driver,
connector)
if 'serial' not in connection_info:
connection_info['serial'] = self.volume_id
+ self._preserve_multipath_id(connection_info)
# If do_driver_attach is False, we will attach a volume to an instance
# at boot time. So actual attach is done by instance creation code.
@@ -252,8 +264,9 @@ def attach(self, context, instance, volume_api, virt_driver,
mode = 'rw'
if 'data' in connection_info:
mode = connection_info['data'].get('access_mode', 'rw')
- volume_api.attach(context, volume_id, instance['uuid'],
- self['mount_device'], mode=mode)
+ if volume['attach_status'] == "detached":
+ volume_api.attach(context, volume_id, instance['uuid'],
+ self['mount_device'], mode=mode)
@update_db
def refresh_connection_info(self, context, instance,
@@ -268,9 +281,10 @@ def refresh_connection_info(self, context, instance,
connector)
if 'serial' not in connection_info:
connection_info['serial'] = self.volume_id
+ self._preserve_multipath_id(connection_info)
self['connection_info'] = connection_info
- def save(self, context):
+ def save(self, context=None):
# NOTE(ndipanov): we might want to generalize this by adding it to the
# _update_on_save and adding a transformation function.
try:
@@ -287,7 +301,7 @@ class DriverSnapshotBlockDevice(DriverVolumeBlockDevice):
_proxy_as_attr = set(['volume_size', 'volume_id', 'snapshot_id'])
def attach(self, context, instance, volume_api,
- virt_driver, wait_func=None):
+ virt_driver, wait_func=None, do_check_attach=True):
if not self.volume_id:
snapshot = volume_api.get_snapshot(context,
@@ -300,8 +314,9 @@ def attach(self, context, instance, volume_api,
self.volume_id = vol['id']
# Call the volume attach now
- super(DriverSnapshotBlockDevice, self).attach(context, instance,
- volume_api, virt_driver)
+ super(DriverSnapshotBlockDevice, self).attach(
+ context, instance, volume_api, virt_driver,
+ do_check_attach=do_check_attach)
class DriverImageBlockDevice(DriverVolumeBlockDevice):
@@ -310,7 +325,7 @@ class DriverImageBlockDevice(DriverVolumeBlockDevice):
_proxy_as_attr = set(['volume_size', 'volume_id', 'image_id'])
def attach(self, context, instance, volume_api,
- virt_driver, wait_func=None):
+ virt_driver, wait_func=None, do_check_attach=True):
if not self.volume_id:
vol = volume_api.create(context, self.volume_size,
'', '', image_id=self.image_id)
@@ -319,8 +334,29 @@ def attach(self, context, instance, volume_api,
self.volume_id = vol['id']
- super(DriverImageBlockDevice, self).attach(context, instance,
- volume_api, virt_driver)
+ super(DriverImageBlockDevice, self).attach(
+ context, instance, volume_api, virt_driver,
+ do_check_attach=do_check_attach)
+
+
+class DriverBlankBlockDevice(DriverVolumeBlockDevice):
+
+ _valid_source = 'blank'
+ _proxy_as_attr = set(['volume_size', 'volume_id', 'image_id'])
+
+ def attach(self, context, instance, volume_api,
+ virt_driver, wait_func=None, do_check_attach=True):
+ if not self.volume_id:
+ vol_name = instance.uuid + '-blank-vol'
+ vol = volume_api.create(context, self.volume_size, vol_name, '')
+ if wait_func:
+ wait_func(context, vol['id'])
+
+ self.volume_id = vol['id']
+
+ super(DriverBlankBlockDevice, self).attach(
+ context, instance, volume_api, virt_driver,
+ do_check_attach=do_check_attach)
def _convert_block_devices(device_type, block_device_mapping):
@@ -354,6 +390,9 @@ def _is_transformable(bdm):
convert_images = functools.partial(_convert_block_devices,
DriverImageBlockDevice)
+convert_blanks = functools.partial(_convert_block_devices,
+ DriverBlankBlockDevice)
+
def attach_block_devices(block_device_mapping, *attach_args, **attach_kwargs):
def _log_and_attach(bdm):
@@ -416,7 +455,7 @@ def get_swap(transformed_list):
_IMPLEMENTED_CLASSES = (DriverSwapBlockDevice, DriverEphemeralBlockDevice,
DriverVolumeBlockDevice, DriverSnapshotBlockDevice,
- DriverImageBlockDevice)
+ DriverImageBlockDevice, DriverBlankBlockDevice)
def is_implemented(bdm):
@@ -427,3 +466,9 @@ def is_implemented(bdm):
except _NotTransformable:
pass
return False
+
+
+def is_block_device_mapping(bdm):
+ return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank')
+ and bdm.destination_type == 'volume'
+ and is_implemented(bdm))
diff --git a/nova/virt/diagnostics.py b/nova/virt/diagnostics.py
index f9fea5c572..2538ad1c09 100644
--- a/nova/virt/diagnostics.py
+++ b/nova/virt/diagnostics.py
@@ -15,6 +15,9 @@
import six
+from nova import exception
+from nova.i18n import _
+
class CpuDiagnostics(object):
@@ -119,19 +122,31 @@ def __init__(self, state=None, driver=None, hypervisor_os=None,
self.uptime = uptime
self.config_drive = config_drive
if cpu_details:
+ self._validate_type(cpu_details, CpuDiagnostics, 'cpu_details')
self.cpu_details = cpu_details
else:
self.cpu_details = []
if nic_details:
+ self._validate_type(nic_details, NicDiagnostics, 'nic_details')
self.nic_details = nic_details
else:
self.nic_details = []
if disk_details:
+ self._validate_type(disk_details, DiskDiagnostics, 'disk_details')
self.disk_details = disk_details
else:
self.disk_details = []
self.memory_details = MemoryDiagnostics()
+ def _validate_type(self, input, type, str_input):
+ if not isinstance(input, list):
+ reason = _("Invalid type for %s") % str_input
+ raise exception.InvalidInput(reason=reason)
+ for i in input:
+ if not isinstance(i, type):
+ reason = _("Invalid type for %s entry") % str_input
+ raise exception.InvalidInput(reason=reason)
+
def add_cpu(self, time=0):
self.cpu_details.append(CpuDiagnostics(time=time))
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index cdc103443a..e2f1e1f1cb 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -33,7 +33,9 @@
from oslo.config import cfg
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova.i18n import _LE
+from nova.i18n import _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
@@ -75,8 +77,16 @@
_MKFS_COMMAND = {}
_DEFAULT_MKFS_COMMAND = None
-_DEFAULT_FS_BY_OSTYPE = {'linux': 'ext3',
- 'windows': 'ntfs'}
+
+FS_FORMAT_EXT2 = "ext2"
+FS_FORMAT_EXT3 = "ext3"
+FS_FORMAT_EXT4 = "ext4"
+FS_FORMAT_XFS = "xfs"
+FS_FORMAT_NTFS = "ntfs"
+FS_FORMAT_VFAT = "vfat"
+
+_DEFAULT_FS_BY_OSTYPE = {'linux': FS_FORMAT_EXT3,
+ 'windows': FS_FORMAT_NTFS}
for s in CONF.virt_mkfs:
# NOTE(yamahata): mkfs command may includes '=' for its options.
@@ -92,7 +102,7 @@ def get_fs_type_for_os_type(os_type):
return os_type if _MKFS_COMMAND.get(os_type) else 'default'
-def mkfs(os_type, fs_label, target, run_as_root=True):
+def mkfs(os_type, fs_label, target, run_as_root=True, specified_fs=None):
"""Format a file or block device using
a user provided command for each os type.
If user has not provided any configuration,
@@ -106,10 +116,12 @@ def mkfs(os_type, fs_label, target, run_as_root=True):
if mkfs_command:
utils.execute(*mkfs_command.split(), run_as_root=run_as_root)
else:
- default_fs = CONF.default_ephemeral_format
- if not default_fs:
- default_fs = _DEFAULT_FS_BY_OSTYPE.get(os_type, 'ext3')
- utils.mkfs(default_fs, target, fs_label, run_as_root=run_as_root)
+ if not specified_fs:
+ specified_fs = CONF.default_ephemeral_format
+ if not specified_fs:
+ specified_fs = _DEFAULT_FS_BY_OSTYPE.get(os_type, 'ext3')
+
+ utils.mkfs(specified_fs, target, fs_label, run_as_root=run_as_root)
def resize2fs(image, check_exit_code=False, run_as_root=False):
@@ -351,8 +363,8 @@ def inject_data(image, key=None, net=None, metadata=None, admin_password=None,
inject_val = locals()[inject]
if inject_val:
raise
- LOG.warn(_('Ignoring error injecting data into image '
- '(%(e)s)'), {'e': e})
+ LOG.warn(_LW('Ignoring error injecting data into image %(image)s '
+ '(%(e)s)'), {'image': image, 'e': e})
return False
try:
@@ -373,7 +385,7 @@ def setup_container(image, container_dir, use_cow=False):
img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir)
dev = img.mount()
if dev is None:
- LOG.error(_("Failed to mount container filesystem '%(image)s' "
+ LOG.error(_LE("Failed to mount container filesystem '%(image)s' "
"on '%(target)s': %(errors)s"),
{"image": img, "target": container_dir,
"errors": img.errors})
@@ -441,8 +453,8 @@ def inject_data_into_fs(fs, key, net, metadata, admin_password, files,
except Exception as e:
if inject in mandatory:
raise
- LOG.warn(_('Ignoring error injecting %(inject)s into image '
- '(%(e)s)'), {'e': e, 'inject': inject})
+ LOG.warn(_LW('Ignoring error injecting %(inject)s into image '
+ '(%(e)s)'), {'inject': inject, 'e': e})
status = False
return status
diff --git a/nova/virt/disk/mount/api.py b/nova/virt/disk/mount/api.py
index 37c50b5450..066842b18e 100644
--- a/nova/virt/disk/mount/api.py
+++ b/nova/virt/disk/mount/api.py
@@ -16,7 +16,7 @@
import os
import time
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/virt/disk/mount/loop.py b/nova/virt/disk/mount/loop.py
index 7a0321a153..d0a157e742 100644
--- a/nova/virt/disk/mount/loop.py
+++ b/nova/virt/disk/mount/loop.py
@@ -13,7 +13,7 @@
# under the License.
"""Support for mounting images with the loop device."""
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk.mount import api
diff --git a/nova/virt/disk/mount/nbd.py b/nova/virt/disk/mount/nbd.py
index 7b2d0add94..80ab966058 100644
--- a/nova/virt/disk/mount/nbd.py
+++ b/nova/virt/disk/mount/nbd.py
@@ -20,7 +20,7 @@
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk.mount import api
diff --git a/nova/virt/disk/vfs/api.py b/nova/virt/disk/vfs/api.py
index a98f12b4f8..d272968c00 100644
--- a/nova/virt/disk/vfs/api.py
+++ b/nova/virt/disk/vfs/api.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova.i18n import _LW
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
@@ -40,7 +41,8 @@ def instance_for_image(imgfile, imgfmt, partition):
"nova.virt.disk.vfs.guestfs.VFSGuestFS",
imgfile, imgfmt, partition)
else:
- LOG.debug("Falling back to VFSLocalFS")
+ LOG.warn(_LW("Unable to import guestfs, "
+ "falling back to VFSLocalFS"))
return importutils.import_object(
"nova.virt.disk.vfs.localfs.VFSLocalFS",
imgfile, imgfmt, partition)
diff --git a/nova/virt/disk/vfs/guestfs.py b/nova/virt/disk/vfs/guestfs.py
index c99dce829e..a829820ab9 100644
--- a/nova/virt/disk/vfs/guestfs.py
+++ b/nova/virt/disk/vfs/guestfs.py
@@ -15,7 +15,8 @@
from eventlet import tpool
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _, _LI
+from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.virt.disk.vfs import api as vfs
@@ -23,6 +24,18 @@
LOG = logging.getLogger(__name__)
guestfs = None
+forceTCG = False
+
+
+def force_tcg(force=True):
+ """Prevent libguestfs trying to use KVM acceleration
+
+ It is a good idea to call this if it is known that
+ KVM is not desired, even if technically available.
+ """
+
+ global forceTCG
+ forceTCG = force
class VFSGuestFS(vfs.VFS):
@@ -37,7 +50,7 @@ def __init__(self, imgfile, imgfmt='raw', partition=None):
global guestfs
if guestfs is None:
- guestfs = __import__('guestfs')
+ guestfs = importutils.import_module('guestfs')
self.handle = None
@@ -105,16 +118,28 @@ def setup(self):
LOG.debug("Setting up appliance for %(imgfile)s %(imgfmt)s",
{'imgfile': self.imgfile, 'imgfmt': self.imgfmt})
try:
- self.handle = tpool.Proxy(guestfs.GuestFS(close_on_exit=False))
+ self.handle = tpool.Proxy(
+ guestfs.GuestFS(python_return_dict=False,
+ close_on_exit=False))
except TypeError as e:
- if 'close_on_exit' in str(e):
+ if 'close_on_exit' in str(e) or 'python_return_dict' in str(e):
# NOTE(russellb) In case we're not using a version of
- # libguestfs new enough to support the close_on_exit parameter,
- # which was added in libguestfs 1.20.
+ # libguestfs new enough to support parameters close_on_exit
+ # and python_return_dict which were added in libguestfs 1.20.
self.handle = tpool.Proxy(guestfs.GuestFS())
else:
raise
+ try:
+ if forceTCG:
+ self.handle.set_backend_settings("force_tcg")
+ except AttributeError as ex:
+ # set_backend_settings method doesn't exist in older
+ # libguestfs versions, so nothing we can do but ignore
+ LOG.info(_LI("Unable to force TCG mode, libguestfs too old?"),
+ ex)
+ pass
+
try:
self.handle.add_drive_opts(self.imgfile, format=self.imgfmt)
self.handle.launch()
diff --git a/nova/virt/disk/vfs/localfs.py b/nova/virt/disk/vfs/localfs.py
index 314295c80a..242db7639e 100644
--- a/nova/virt/disk/vfs/localfs.py
+++ b/nova/virt/disk/vfs/localfs.py
@@ -16,8 +16,8 @@
import tempfile
from nova import exception
+from nova.i18n import _
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.disk.mount import loop
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 95459d1eba..f12757ad6a 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -24,7 +24,7 @@
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
@@ -33,9 +33,9 @@
driver_opts = [
cfg.StrOpt('compute_driver',
help='Driver to use for controlling virtualization. Options '
- 'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
- 'fake.FakeDriver, baremetal.BareMetalDriver, '
- 'vmwareapi.VMwareVCDriver'),
+ 'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
+ 'fake.FakeDriver, baremetal.BareMetalDriver, '
+ 'vmwareapi.VMwareVCDriver, hyperv.HyperVDriver'),
cfg.StrOpt('default_ephemeral_format',
help='The default format an ephemeral_volume will be '
'formatted with on creation.'),
@@ -319,7 +319,7 @@ def destroy(self, context, instance, network_info, block_device_info=None,
raise NotImplementedError()
def cleanup(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True, migrate_data=None):
+ destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup the instance resources .
Instance should have been destroyed from the Hypervisor before calling
@@ -438,10 +438,13 @@ def detach_volume(self, connection_info, instance, mountpoint,
raise NotImplementedError()
def swap_volume(self, old_connection_info, new_connection_info,
- instance, mountpoint):
+ instance, mountpoint, resize_to):
"""Replace the disk attached to the instance.
:param instance: nova.objects.instance.Instance
+ :param resize_to: This parameter is used to indicate the new volume
+ size when the new volume lager than old volume.
+ And the units is Gigabyte.
"""
raise NotImplementedError()
@@ -461,11 +464,15 @@ def detach_interface(self, instance, vif):
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
- block_device_info=None):
+ block_device_info=None,
+ timeout=0, retry_interval=0):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
:param instance: nova.objects.instance.Instance
+ :param timeout: time to wait for GuestOS to shutdown
+ :param retry_interval: How often to signal guest while
+ waiting for it to shutdown
"""
raise NotImplementedError()
@@ -479,6 +486,14 @@ def snapshot(self, context, instance, image_id, update_task_state):
"""
raise NotImplementedError()
+ def post_interrupted_snapshot_cleanup(self, context, instance):
+ """Cleans up any resources left after an interrupted snapshot.
+
+ :param context: security context
+ :param instance: nova.objects.instance.Instance
+ """
+ pass
+
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
@@ -589,10 +604,13 @@ def unrescue(self, instance, network_info):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def power_off(self, instance):
+ def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
+ :param timeout: time to wait for GuestOS to shutdown
+ :param retry_interval: How often to signal guest while
+ waiting for it to shutdown
"""
raise NotImplementedError()
@@ -695,6 +713,16 @@ def post_live_migration(self, context, instance, block_device_info,
"""
pass
+ def post_live_migration_at_source(self, context, instance, network_info):
+ """Unplug VIFs from networks at source.
+
+ :param context: security context
+ :param instance: instance object reference
+ :param network_info: instance network information
+ """
+ raise NotImplementedError(_("Hypervisor driver does not support "
+ "post_live_migration_at_source method"))
+
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
@@ -1025,11 +1053,13 @@ def get_host_cpu_stats(self):
"""Get the currently known host CPU stats.
:returns: a dict containing the CPU stat info, eg:
- {'kernel': kern,
- 'idle': idle,
- 'user': user,
- 'iowait': wait,
- 'frequency': freq},
+
+ | {'kernel': kern,
+ | 'idle': idle,
+ | 'user': user,
+ | 'iowait': wait,
+ | 'frequency': freq},
+
where kern and user indicate the cumulative CPU time
(nanoseconds) spent by kernel and user processes
respectively, idle indicates the cumulative idle CPU time
@@ -1037,6 +1067,7 @@ def get_host_cpu_stats(self):
time (nanoseconds), since the host is booting up; freq
indicates the current CPU frequency (MHz). All values are
long integers.
+
"""
raise NotImplementedError()
@@ -1078,6 +1109,10 @@ def interface_stats(self, instance_name, iface_id):
"""
raise NotImplementedError()
+ def deallocate_networks_on_reschedule(self, instance):
+ """Does the driver want networks deallocated on reschedule?"""
+ return False
+
def macs_for_instance(self, instance):
"""What MAC addresses must this instance have?
@@ -1121,13 +1156,15 @@ def dhcp_options_for_instance(self, instance):
client API.
:return: None, or a set of DHCP options, eg:
- [{'opt_name': 'bootfile-name',
- 'opt_value': '/tftpboot/path/to/config'},
- {'opt_name': 'server-ip-address',
- 'opt_value': '1.2.3.4'},
- {'opt_name': 'tftp-server',
- 'opt_value': '1.2.3.4'}
- ]
+
+ | [{'opt_name': 'bootfile-name',
+ | 'opt_value': '/tftpboot/path/to/config'},
+ | {'opt_name': 'server-ip-address',
+ | 'opt_value': '1.2.3.4'},
+ | {'opt_name': 'tftp-server',
+ | 'opt_value': '1.2.3.4'}
+ | ]
+
"""
pass
@@ -1145,7 +1182,7 @@ def manage_image_cache(self, context, all_instances):
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
- #NOTE(jogo) Currently only used for XenAPI-Pool
+ # NOTE(jogo) Currently only used for XenAPI-Pool
raise NotImplementedError()
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
@@ -1308,6 +1345,18 @@ def default_device_names_for_instance(self, instance, root_device_name,
"""Default the missing device names in the block device mapping."""
raise NotImplementedError()
+ def is_supported_fs_format(self, fs_type):
+ """Check whether the file format is supported by this driver
+
+ :param fs_type: the file system type to be checked,
+ the validate values are defined at disk API module.
+ """
+ # NOTE(jichenjc): Return False here so that every hypervisor
+ # need to define their supported file system
+ # type and implement this function at their
+ # virt layer.
+ return False
+
def load_compute_driver(virtapi, compute_driver=None):
"""Load a compute driver module.
diff --git a/nova/virt/event.py b/nova/virt/event.py
index 51db4cd1b9..02b9cddbd5 100644
--- a/nova/virt/event.py
+++ b/nova/virt/event.py
@@ -22,7 +22,7 @@
import time
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
EVENT_LIFECYCLE_STARTED = 0
EVENT_LIFECYCLE_STOPPED = 1
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 21d74633b6..90549d2e07 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -31,7 +31,7 @@
from nova.compute import task_states
from nova import db
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
@@ -171,7 +171,8 @@ def poll_rebooting_instances(self, timeout, instances):
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
- block_device_info=None):
+ block_device_info=None,
+ timeout=0, retry_interval=0):
pass
def finish_revert_migration(self, context, instance, network_info,
@@ -184,7 +185,7 @@ def post_live_migration_at_destination(self, context, instance,
block_device_info=None):
pass
- def power_off(self, instance):
+ def power_off(self, instance, shutdown_timeout=0, shutdown_attempts=0):
pass
def power_on(self, context, instance, network_info, block_device_info):
@@ -219,7 +220,7 @@ def destroy(self, context, instance, network_info, block_device_info=None,
'inst': self.instances}, instance=instance)
def cleanup(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True, migrate_data=None):
+ destroy_disks=True, migrate_data=None, destroy_vifs=True):
pass
def attach_volume(self, context, connection_info, instance, mountpoint,
@@ -239,7 +240,7 @@ def detach_volume(self, connection_info, instance, mountpoint,
pass
def swap_volume(self, old_connection_info, new_connection_info,
- instance, mountpoint):
+ instance, mountpoint, resize_to):
"""Replace the disk attached to the instance."""
instance_name = instance['name']
if instance_name not in self._mounts:
@@ -248,14 +249,16 @@ def swap_volume(self, old_connection_info, new_connection_info,
def attach_interface(self, instance, image_meta, vif):
if vif['id'] in self._interfaces:
- raise exception.InterfaceAttachFailed('duplicate')
+ raise exception.InterfaceAttachFailed(
+ instance_uuid=instance['uuid'])
self._interfaces[vif['id']] = vif
def detach_interface(self, instance, vif):
try:
del self._interfaces[vif['id']]
except KeyError:
- raise exception.InterfaceDetachFailed('not attached')
+ raise exception.InterfaceDetachFailed(
+ instance_uuid=instance['uuid'])
def get_info(self, instance):
if instance['name'] not in self.instances:
@@ -382,7 +385,7 @@ def get_available_resource(self, nodename):
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
- 'hypervisor_version': '1.0',
+ 'hypervisor_version': utils.convert_version_to_int('1.0'),
'hypervisor_hostname': nodename,
'disk_available_least': 0,
'cpu_info': '?',
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index cd3c75b66c..15e65ed9c7 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -19,12 +19,10 @@
from nova.compute import utils as compute_utils
from nova import context
+from nova.i18n import _
+from nova.i18n import _LI
from nova.network import linux_net
from nova import objects
-from nova.objects import security_group as security_group_obj
-from nova.objects import security_group_rule as security_group_rule_obj
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common.gettextutils import _LI
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
@@ -346,7 +344,7 @@ def instance_rules(self, instance, network_info):
# Set up rules to allow traffic to/from DHCP server
self._do_dhcp_rules(ipv4_rules, network_info)
- #Allow project network traffic
+ # Allow project network traffic
if CONF.allow_same_net_traffic:
self._do_project_network_rules(ipv4_rules, ipv6_rules,
network_info)
@@ -357,13 +355,13 @@ def instance_rules(self, instance, network_info):
# Allow RA responses
self._do_ra_rules(ipv6_rules, network_info)
- security_groups = security_group_obj.SecurityGroupList.get_by_instance(
+ security_groups = objects.SecurityGroupList.get_by_instance(
ctxt, instance)
# then, security group chains and rules
for security_group in security_groups:
- rules_cls = security_group_rule_obj.SecurityGroupRuleList
- rules = rules_cls.get_by_security_group(ctxt, security_group)
+ rules = objects.SecurityGroupRuleList.get_by_security_group(
+ ctxt, security_group)
for rule in rules:
if not rule['cidr']:
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index 2e5a1deca2..0f378fbdd4 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -17,7 +17,8 @@
from oslo.config import cfg
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
virt_cpu_opts = [
@@ -480,3 +481,303 @@ def get_best_config(flavor, image_meta, allow_threads=True):
return VirtCPUTopology.get_desirable_configs(flavor,
image_meta,
allow_threads)[0]
+
+
+class VirtNUMATopologyCell(object):
+ """Class for reporting NUMA resources in a cell
+
+ The VirtNUMATopologyCell class represents the
+ hardware resources present in a NUMA cell.
+ """
+
+ def __init__(self, id, cpuset, memory):
+ """Create a new NUMA Cell
+
+ :param id: integer identifier of cell
+ :param cpuset: set containing list of CPU indexes
+ :param memory: RAM measured in KiB
+
+ Creates a new NUMA cell object to record the hardware
+ resources.
+
+ :returns: a new NUMA cell object
+ """
+
+ super(VirtNUMATopologyCell, self).__init__()
+
+ self.id = id
+ self.cpuset = cpuset
+ self.memory = memory
+
+ def _to_dict(self):
+ return {'cpus': format_cpu_spec(self.cpuset, allow_ranges=False),
+ 'mem': {'total': self.memory},
+ 'id': self.id}
+
+ @classmethod
+ def _from_dict(cls, data_dict):
+ cpuset = parse_cpu_spec(data_dict.get('cpus', ''))
+ memory = data_dict.get('mem', {}).get('total', 0)
+ cell_id = data_dict.get('id')
+ return cls(cell_id, cpuset, memory)
+
+
+class VirtNUMATopologyCellUsage(VirtNUMATopologyCell):
+ """Class for reporting NUMA resources and usage in a cell
+
+ The VirtNUMATopologyCellUsage class specializes
+ VirtNUMATopologyCell to include information about the
+ utilization of hardware resources in a NUMA cell.
+ """
+
+ def __init__(self, id, cpuset, memory, cpu_usage=0, memory_usage=0):
+ """Create a new NUMA Cell with usage
+
+ :param id: integer identifier of cell
+ :param cpuset: set containing list of CPU indexes
+ :param memory: RAM measured in KiB
+ :param cpu_usage: number of CPUs allocated
+ :param memory_usage: RAM allocated in KiB
+
+ Creates a new NUMA cell object to record the hardware
+ resources and utilization. The number of CPUs specified
+ by the @cpu_usage parameter may be larger than the number
+ of bits set in @cpuset if CPU overcommit is used. Likewise
+ the amount of RAM specified by the @memory_usage parameter
+ may be larger than the available RAM in @memory if RAM
+ overcommit is used.
+
+ :returns: a new NUMA cell object
+ """
+
+ super(VirtNUMATopologyCellUsage, self).__init__(
+ id, cpuset, memory)
+
+ self.cpu_usage = cpu_usage
+ self.memory_usage = memory_usage
+
+ def _to_dict(self):
+ data_dict = super(VirtNUMATopologyCellUsage, self)._to_dict()
+ data_dict['mem']['used'] = self.memory_usage
+ data_dict['cpu_usage'] = self.cpu_usage
+ return data_dict
+
+ @classmethod
+ def _from_dict(cls, data_dict):
+ cpuset = parse_cpu_spec(data_dict.get('cpus', ''))
+ cpu_usage = data_dict.get('cpu_usage', 0)
+ memory = data_dict.get('mem', {}).get('total', 0)
+ memory_usage = data_dict.get('mem', {}).get('used', 0)
+ cell_id = data_dict.get('id')
+ return cls(cell_id, cpuset, memory, cpu_usage, memory_usage)
+
+
+class VirtNUMATopology(object):
+ """Base class for tracking NUMA topology information
+
+ The VirtNUMATopology class represents the NUMA hardware
+ topology for memory and CPUs in any machine. It is
+ later specialized for handling either guest instance
+ or compute host NUMA topology.
+ """
+
+ def __init__(self, cells=None):
+ """Create a new NUMA topology object
+
+ :param cells: list of VirtNUMATopologyCell instances
+
+ """
+
+ super(VirtNUMATopology, self).__init__()
+
+ self.cells = cells or []
+
+ def __len__(self):
+ """Defined so that boolean testing works the same as for lists."""
+ return len(self.cells)
+
+ def __repr__(self):
+ return "<%s: %s>" % (self.__class__.__name__, str(self._to_dict()))
+
+ def _to_dict(self):
+ return {'cells': [cell._to_dict() for cell in self.cells]}
+
+ @classmethod
+ def _from_dict(cls, data_dict):
+ return cls(cells=[cls.cell_class._from_dict(cell_dict)
+ for cell_dict in data_dict.get('cells', [])])
+
+ def to_json(self):
+ return jsonutils.dumps(self._to_dict())
+
+ @classmethod
+ def from_json(cls, json_string):
+ return cls._from_dict(jsonutils.loads(json_string))
+
+
+class VirtNUMAInstanceTopology(VirtNUMATopology):
+ """Class to represent the topology configured for a guest
+ instance. It provides helper APIs to determine configuration
+ from the metadata specified against the flavour and or
+ disk image
+ """
+
+ cell_class = VirtNUMATopologyCell
+
+ @staticmethod
+ def _get_flavor_or_image_prop(flavor, image_meta, propname):
+ flavor_val = flavor.get('extra_specs', {}).get("hw:" + propname)
+ image_val = image_meta.get("hw_" + propname)
+
+ if flavor_val is not None:
+ if image_val is not None:
+ raise exception.ImageNUMATopologyForbidden(
+ name='hw_' + propname)
+
+ return flavor_val
+ else:
+ return image_val
+
+ @classmethod
+ def _get_constraints_manual(cls, nodes, flavor, image_meta):
+ cells = []
+ totalmem = 0
+
+ availcpus = set(range(flavor.vcpus))
+
+ for node in range(nodes):
+ cpus = cls._get_flavor_or_image_prop(
+ flavor, image_meta, "numa_cpus.%d" % node)
+ mem = cls._get_flavor_or_image_prop(
+ flavor, image_meta, "numa_mem.%d" % node)
+
+ # We're expecting both properties set, so
+ # raise an error if either is missing
+ if cpus is None or mem is None:
+ raise exception.ImageNUMATopologyIncomplete()
+
+ mem = int(mem)
+ cpuset = parse_cpu_spec(cpus)
+
+ for cpu in cpuset:
+ if cpu > (flavor.vcpus - 1):
+ raise exception.ImageNUMATopologyCPUOutOfRange(
+ cpunum=cpu, cpumax=(flavor.vcpus - 1))
+
+ if cpu not in availcpus:
+ raise exception.ImageNUMATopologyCPUDuplicates(
+ cpunum=cpu)
+
+ availcpus.remove(cpu)
+
+ cells.append(VirtNUMATopologyCell(node, cpuset, mem))
+ totalmem = totalmem + mem
+
+ if availcpus:
+ raise exception.ImageNUMATopologyCPUsUnassigned(
+ cpuset=str(availcpus))
+
+ if totalmem != flavor.memory_mb:
+ raise exception.ImageNUMATopologyMemoryOutOfRange(
+ memsize=totalmem,
+ memtotal=flavor.memory_mb)
+
+ return cls(cells)
+
+ @classmethod
+ def _get_constraints_auto(cls, nodes, flavor, image_meta):
+ if ((flavor.vcpus % nodes) > 0 or
+ (flavor.memory_mb % nodes) > 0):
+ raise exception.ImageNUMATopologyAsymmetric()
+
+ cells = []
+ for node in range(nodes):
+ cpus = cls._get_flavor_or_image_prop(
+ flavor, image_meta, "numa_cpus.%d" % node)
+ mem = cls._get_flavor_or_image_prop(
+ flavor, image_meta, "numa_mem.%d" % node)
+
+ # We're not expecting any properties set, so
+ # raise an error if there are any
+ if cpus is not None or mem is not None:
+ raise exception.ImageNUMATopologyIncomplete()
+
+ ncpus = int(flavor.vcpus / nodes)
+ mem = int(flavor.memory_mb / nodes)
+ start = node * ncpus
+ cpuset = set(range(start, start + ncpus))
+
+ cells.append(VirtNUMATopologyCell(node, cpuset, mem))
+
+ return cls(cells)
+
+ @classmethod
+ def get_constraints(cls, flavor, image_meta):
+ nodes = cls._get_flavor_or_image_prop(
+ flavor, image_meta, "numa_nodes")
+
+ if nodes is None:
+ return None
+
+ nodes = int(nodes)
+
+ # We'll pick what path to go down based on whether
+ # anything is set for the first node. Both paths
+ # have logic to cope with inconsistent property usage
+ auto = cls._get_flavor_or_image_prop(
+ flavor, image_meta, "numa_cpus.0") is None
+
+ if auto:
+ return cls._get_constraints_auto(
+ nodes, flavor, image_meta)
+ else:
+ return cls._get_constraints_manual(
+ nodes, flavor, image_meta)
+
+
+class VirtNUMAHostTopology(VirtNUMATopology):
+
+ """Class represents the NUMA configuration and utilization
+ of a compute node. As well as exposing the overall topology
+ it tracks the utilization of the resources by guest instances
+ """
+
+ cell_class = VirtNUMATopologyCellUsage
+
+ @classmethod
+ def usage_from_instances(cls, host, instances, free=False):
+ """Get host topology usage
+
+ :param host: VirtNUMAHostTopology with usage information
+ :param instances: list of VirtNUMAInstanceTopology
+ :param free: If True usage of the host will be decreased
+
+ Sum the usage from all @instances to report the overall
+ host topology usage
+
+ :returns: VirtNUMAHostTopology including usage information
+ """
+
+ if host is None:
+ return
+
+ instances = instances or []
+ cells = []
+ sign = -1 if free else 1
+ for hostcell in host.cells:
+ memory_usage = hostcell.memory_usage
+ cpu_usage = hostcell.cpu_usage
+ for instance in instances:
+ for instancecell in instance.cells:
+ if instancecell.id == hostcell.id:
+ memory_usage = (
+ memory_usage + sign * instancecell.memory)
+ cpu_usage = cpu_usage + sign * len(instancecell.cpuset)
+
+ cell = cls.cell_class(
+ hostcell.id, hostcell.cpuset, hostcell.memory,
+ max(0, cpu_usage), max(0, memory_usage))
+
+ cells.append(cell)
+
+ return cls(cells)
diff --git a/nova/virt/hyperv/__init__.py b/nova/virt/hyperv/__init__.py
index e69de29bb2..475333111b 100644
--- a/nova/virt/hyperv/__init__.py
+++ b/nova/virt/hyperv/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) 2014 Cloudbase Solutions Srl
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.virt.hyperv import driver
+
+HyperVDriver = driver.HyperVDriver
diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py
index 359417e500..c074d879d8 100644
--- a/nova/virt/hyperv/basevolumeutils.py
+++ b/nova/virt/hyperv/basevolumeutils.py
@@ -28,7 +28,7 @@
import wmi
from nova import block_device
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt import driver
@@ -70,8 +70,9 @@ def get_iscsi_initiator(self):
except Exception:
LOG.info(_("The ISCSI initiator name can't be found. "
"Choosing the default one"))
- computer_system = self._conn_cimv2.Win32_ComputerSystem()[0]
initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower()
+ if computer_system.PartofDomain:
+ initiator_name += '.' + computer_system.Domain.lower()
return initiator_name
def volume_in_mapping(self, mount_device, block_device_info):
diff --git a/nova/virt/hyperv/constants.py b/nova/virt/hyperv/constants.py
index fc12ca9c75..4aaaf2ffa8 100644
--- a/nova/virt/hyperv/constants.py
+++ b/nova/virt/hyperv/constants.py
@@ -21,12 +21,14 @@
HYPERV_VM_STATE_ENABLED = 2
HYPERV_VM_STATE_DISABLED = 3
+HYPERV_VM_STATE_SHUTTING_DOWN = 4
HYPERV_VM_STATE_REBOOT = 10
HYPERV_VM_STATE_PAUSED = 32768
HYPERV_VM_STATE_SUSPENDED = 32769
HYPERV_POWER_STATE = {
HYPERV_VM_STATE_DISABLED: power_state.SHUTDOWN,
+ HYPERV_VM_STATE_SHUTTING_DOWN: power_state.SHUTDOWN,
HYPERV_VM_STATE_ENABLED: power_state.RUNNING,
HYPERV_VM_STATE_PAUSED: power_state.PAUSED,
HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED
@@ -66,10 +68,19 @@
VM_SUMMARY_UPTIME = 105
IDE_DISK = "VHD"
+IDE_DISK_FORMAT = IDE_DISK
IDE_DVD = "DVD"
+IDE_DVD_FORMAT = "ISO"
+
+DISK_FORMAT_MAP = {
+ IDE_DISK_FORMAT.lower(): IDE_DISK,
+ IDE_DVD_FORMAT.lower(): IDE_DVD
+}
DISK_FORMAT_VHD = "VHD"
DISK_FORMAT_VHDX = "VHDX"
VHD_TYPE_FIXED = 2
VHD_TYPE_DYNAMIC = 3
+
+SCSI_CONTROLLER_SLOTS_NUMBER = 64
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index daea18b959..cacb165231 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -17,7 +17,7 @@
A Hyper-V Nova Compute driver.
"""
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt.hyperv import hostops
@@ -64,7 +64,7 @@ def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True, migrate_data=None):
+ destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
@@ -109,7 +109,8 @@ def suspend(self, instance):
def resume(self, context, instance, network_info, block_device_info=None):
self._vmops.resume(instance)
- def power_off(self, instance):
+ def power_off(self, instance, timeout=0, retry_interval=0):
+ # TODO(PhilDay): Add support for timeout (clean shutdown)
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
@@ -164,6 +165,9 @@ def check_can_live_migrate_source(self, context, instance,
return self._livemigrationops.check_can_live_migrate_source(
context, instance, dest_check_data)
+ def get_instance_disk_info(self, instance_name, block_device_info=None):
+ pass
+
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
msg = _("VIF plugging is not supported by the Hyper-V driver.")
@@ -183,7 +187,9 @@ def unfilter_instance(self, instance, network_info):
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
- block_device_info=None):
+ block_device_info=None,
+ timeout=0, retry_interval=0):
+ # TODO(PhilDay): Add support for timeout (clean shutdown)
return self._migrationops.migrate_disk_and_power_off(context,
instance, dest,
flavor,
@@ -210,5 +216,8 @@ def finish_migration(self, context, migration, instance, disk_info,
def get_host_ip_addr(self):
return self._hostops.get_host_ip_addr()
+ def get_host_uptime(self, host):
+ return self._hostops.get_host_uptime()
+
def get_rdp_console(self, context, instance):
return self._rdpconsoleops.get_rdp_console(instance)
diff --git a/nova/virt/hyperv/hostops.py b/nova/virt/hyperv/hostops.py
index 994efc8b67..e5f42009d7 100644
--- a/nova/virt/hyperv/hostops.py
+++ b/nova/virt/hyperv/hostops.py
@@ -16,8 +16,10 @@
"""
Management class for host operations.
"""
+import datetime
import os
import platform
+import time
from oslo.config import cfg
@@ -177,3 +179,23 @@ def get_host_ip_addr(self):
host_ip = self._hostutils.get_local_ips()[0]
LOG.debug("Host IP address is: %s", host_ip)
return host_ip
+
+ def get_host_uptime(self):
+ """Returns the host uptime."""
+
+ tick_count64 = self._hostutils.get_host_tick_count64()
+
+ # format the string to match libvirt driver uptime
+ # Libvirt uptime returns a combination of the following
+ # - curent host time
+ # - time since host is up
+ # - number of logged in users
+ # - cpu load
+ # Since the Windows function GetTickCount64 returns only
+ # the time since the host is up, returning 0s for cpu load
+ # and number of logged in users.
+ # This is done to ensure the format of the returned
+ # value is same as in libvirt
+ return "%s up %s, 0 users, load average: 0, 0, 0" % (
+ str(time.strftime("%H:%M:%S")),
+ str(datetime.timedelta(milliseconds=long(tick_count64))))
diff --git a/nova/virt/hyperv/hostutils.py b/nova/virt/hyperv/hostutils.py
index e7f4534132..e7fc9b68e8 100644
--- a/nova/virt/hyperv/hostutils.py
+++ b/nova/virt/hyperv/hostutils.py
@@ -75,3 +75,6 @@ def get_local_ips(self):
# Returns IPv4 and IPv6 addresses, ordered by protocol family
addr_info.sort()
return [a[4][0] for a in addr_info]
+
+ def get_host_tick_count64(self):
+ return ctypes.windll.kernel32.GetTickCount64()
diff --git a/nova/virt/hyperv/imagecache.py b/nova/virt/hyperv/imagecache.py
index c4b86e2b6c..88eacb14fd 100644
--- a/nova/virt/hyperv/imagecache.py
+++ b/nova/virt/hyperv/imagecache.py
@@ -20,8 +20,8 @@
from oslo.config import cfg
from nova.compute import flavors
+from nova.i18n import _
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova import utils
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
index d8bc81e7f1..5411967498 100644
--- a/nova/virt/hyperv/livemigrationops.py
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -20,8 +20,8 @@
from oslo.config import cfg
+from nova.i18n import _
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
diff --git a/nova/virt/hyperv/livemigrationutils.py b/nova/virt/hyperv/livemigrationutils.py
index 219b4e10ca..4ff16fc2ab 100644
--- a/nova/virt/hyperv/livemigrationutils.py
+++ b/nova/virt/hyperv/livemigrationutils.py
@@ -19,7 +19,7 @@
import wmi
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import vmutilsv2
@@ -181,7 +181,6 @@ def _get_vhd_setting_data(self, vm):
for sasd in sasds:
if (sasd.ResourceType == 31 and sasd.ResourceSubType ==
"Microsoft:Hyper-V:Virtual Hard Disk"):
- #sasd.PoolId = ""
new_resource_setting_data.append(sasd.GetText_(1))
return new_resource_setting_data
diff --git a/nova/virt/hyperv/migrationops.py b/nova/virt/hyperv/migrationops.py
index e883fba1e6..a803d4f797 100644
--- a/nova/virt/hyperv/migrationops.py
+++ b/nova/virt/hyperv/migrationops.py
@@ -18,10 +18,12 @@
"""
import os
+from nova import exception
+from nova.i18n import _
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import units
+from nova.virt import configdrive
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vmops
@@ -101,11 +103,13 @@ def _check_target_flavor(self, instance, flavor):
curr_root_gb = instance['root_gb']
if new_root_gb < curr_root_gb:
- raise vmutils.VHDResizeException(
- _("Cannot resize the root disk to a smaller size. Current "
- "size: %(curr_root_gb)s GB. Requested size: "
- "%(new_root_gb)s GB") %
- {'curr_root_gb': curr_root_gb, 'new_root_gb': new_root_gb})
+ raise exception.InstanceFaultRollback(
+ vmutils.VHDResizeException(
+ _("Cannot resize the root disk to a smaller size. "
+ "Current size: %(curr_root_gb)s GB. Requested size: "
+ "%(new_root_gb)s GB") %
+ {'curr_root_gb': curr_root_gb,
+ 'new_root_gb': new_root_gb}))
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
@@ -143,6 +147,17 @@ def _revert_migration_files(self, instance_name):
instance_name)
self._pathutils.rename(revert_path, instance_path)
+ def _check_and_attach_config_drive(self, instance):
+ if configdrive.required_by(instance):
+ configdrive_path = self._pathutils.lookup_configdrive_path(
+ instance.name)
+ if configdrive_path:
+ self._vmops.attach_config_drive(instance, configdrive_path)
+ else:
+ raise vmutils.HyperVException(
+ _("Config drive is required by instance: %s, "
+ "but it does not exist.") % instance.name)
+
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("finish_revert_migration called", instance=instance)
@@ -160,6 +175,8 @@ def finish_revert_migration(self, context, instance, network_info,
self._vmops.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path)
+ self._check_and_attach_config_drive(instance)
+
if power_on:
self._vmops.power_on(instance)
@@ -268,5 +285,8 @@ def finish_migration(self, context, migration, instance, disk_info,
self._vmops.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path)
+
+ self._check_and_attach_config_drive(instance)
+
if power_on:
self._vmops.power_on(instance)
diff --git a/nova/virt/hyperv/networkutils.py b/nova/virt/hyperv/networkutils.py
index 2b45343b8a..07ad489187 100644
--- a/nova/virt/hyperv/networkutils.py
+++ b/nova/virt/hyperv/networkutils.py
@@ -23,7 +23,7 @@
if sys.platform == 'win32':
import wmi
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.virt.hyperv import vmutils
@@ -48,7 +48,7 @@ def get_external_vswitch(self, vswitch_name):
def create_vswitch_port(self, vswitch_path, port_name):
switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0]
- #Create a port on the vswitch.
+ # Create a port on the vswitch.
(new_port, ret_val) = switch_svc.CreateSwitchPort(
Name=str(uuid.uuid4()),
FriendlyName=port_name,
diff --git a/nova/virt/hyperv/networkutilsv2.py b/nova/virt/hyperv/networkutilsv2.py
index c3ec6a3497..558f7c44cd 100644
--- a/nova/virt/hyperv/networkutilsv2.py
+++ b/nova/virt/hyperv/networkutilsv2.py
@@ -24,7 +24,7 @@
if sys.platform == 'win32':
import wmi
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.virt.hyperv import networkutils
from nova.virt.hyperv import vmutils
diff --git a/nova/virt/hyperv/pathutils.py b/nova/virt/hyperv/pathutils.py
index 853281899d..4e4a83a366 100644
--- a/nova/virt/hyperv/pathutils.py
+++ b/nova/virt/hyperv/pathutils.py
@@ -18,9 +18,10 @@
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova import utils
+from nova.virt.hyperv import constants
LOG = logging.getLogger(__name__)
@@ -132,6 +133,15 @@ def _lookup_vhd_path(self, instance_name, vhd_path_func):
def lookup_root_vhd_path(self, instance_name):
return self._lookup_vhd_path(instance_name, self.get_root_vhd_path)
+ def lookup_configdrive_path(self, instance_name):
+ configdrive_path = None
+ for format_ext in constants.DISK_FORMAT_MAP:
+ test_path = self.get_configdrive_path(instance_name, format_ext)
+ if self.exists(test_path):
+ configdrive_path = test_path
+ break
+ return configdrive_path
+
def lookup_ephemeral_vhd_path(self, instance_name):
return self._lookup_vhd_path(instance_name,
self.get_ephemeral_vhd_path)
@@ -140,6 +150,10 @@ def get_root_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'root.' + format_ext.lower())
+ def get_configdrive_path(self, instance_name, format_ext):
+ instance_path = self.get_instance_dir(instance_name)
+ return os.path.join(instance_path, 'configdrive.' + format_ext.lower())
+
def get_ephemeral_vhd_path(self, instance_name, format_ext):
instance_path = self.get_instance_dir(instance_name)
return os.path.join(instance_path, 'ephemeral.' + format_ext.lower())
diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py
index a103557579..0c604b46fc 100644
--- a/nova/virt/hyperv/snapshotops.py
+++ b/nova/virt/hyperv/snapshotops.py
@@ -21,8 +21,8 @@
from oslo.config import cfg
from nova.compute import task_states
+from nova.i18n import _
from nova.image import glance
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import utilsfactory
diff --git a/nova/virt/hyperv/utilsfactory.py b/nova/virt/hyperv/utilsfactory.py
index 6259586160..88778008f8 100644
--- a/nova/virt/hyperv/utilsfactory.py
+++ b/nova/virt/hyperv/utilsfactory.py
@@ -15,6 +15,7 @@
from oslo.config import cfg
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import hostutils
from nova.virt.hyperv import livemigrationutils
@@ -57,20 +58,30 @@ def _get_class(v1_class, v2_class, force_v1_flag):
return cls
+def _get_virt_utils_class(v1_class, v2_class):
+ # The "root/virtualization" WMI namespace is no longer supported on
+ # Windows Server / Hyper-V Server 2012 R2 / Windows 8.1
+ # (kernel version 6.3) or above.
+ if (CONF.hyperv.force_hyperv_utils_v1 and
+ get_hostutils().check_min_windows_version(6, 3)):
+ raise vmutils.HyperVException(
+ _('The "force_hyperv_utils_v1" option cannot be set to "True" '
+ 'on Windows Server / Hyper-V Server 2012 R2 or above as the WMI '
+ '"root/virtualization" namespace is no longer supported.'))
+ return _get_class(v1_class, v2_class, CONF.hyperv.force_hyperv_utils_v1)
+
+
def get_vmutils(host='.'):
- return _get_class(vmutils.VMUtils, vmutilsv2.VMUtilsV2,
- CONF.hyperv.force_hyperv_utils_v1)(host)
+ return _get_virt_utils_class(vmutils.VMUtils, vmutilsv2.VMUtilsV2)(host)
def get_vhdutils():
- return _get_class(vhdutils.VHDUtils, vhdutilsv2.VHDUtilsV2,
- CONF.hyperv.force_hyperv_utils_v1)()
+ return _get_virt_utils_class(vhdutils.VHDUtils, vhdutilsv2.VHDUtilsV2)()
def get_networkutils():
- return _get_class(networkutils.NetworkUtils,
- networkutilsv2.NetworkUtilsV2,
- CONF.hyperv.force_hyperv_utils_v1)()
+ return _get_virt_utils_class(networkutils.NetworkUtils,
+ networkutilsv2.NetworkUtilsV2)()
def get_hostutils():
@@ -91,6 +102,5 @@ def get_livemigrationutils():
def get_rdpconsoleutils():
- return _get_class(rdpconsoleutils.RDPConsoleUtils,
- rdpconsoleutilsv2.RDPConsoleUtilsV2,
- CONF.hyperv.force_hyperv_utils_v1)()
+ return _get_virt_utils_class(rdpconsoleutils.RDPConsoleUtils,
+ rdpconsoleutilsv2.RDPConsoleUtilsV2)()
diff --git a/nova/virt/hyperv/vhdutils.py b/nova/virt/hyperv/vhdutils.py
index a7c6502720..55c3a45658 100644
--- a/nova/virt/hyperv/vhdutils.py
+++ b/nova/virt/hyperv/vhdutils.py
@@ -31,7 +31,7 @@
from xml.etree import ElementTree
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
@@ -72,7 +72,10 @@ def create_dynamic_vhd(self, path, max_internal_size, format):
Path=path, MaxInternalSize=max_internal_size)
self._vmutils.check_ret_val(ret_val, job_path)
- def create_differencing_vhd(self, path, parent_path):
+ def create_differencing_vhd(self, path, parent_path, size=None):
+ if size is not None:
+ raise vmutils.HyperVException(_('VHD differencing disks cannot be '
+ 'resized'))
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(job_path, ret_val) = image_man_svc.CreateDifferencingVirtualHardDisk(
@@ -115,21 +118,21 @@ def resize_vhd(self, vhd_path, new_max_size, is_file_max_size=True):
def get_internal_vhd_size_by_file_size(self, vhd_path, new_vhd_file_size):
"""Fixed VHD size = Data Block size + 512 bytes
- Dynamic_VHD_size = Dynamic Disk Header
- + Copy of hard disk footer
- + Hard Disk Footer
- + Data Block
- + BAT
- Dynamic Disk header fields
- Copy of hard disk footer (512 bytes)
- Dynamic Disk Header (1024 bytes)
- BAT (Block Allocation table)
- Data Block 1
- Data Block 2
- Data Block n
- Hard Disk Footer (512 bytes)
- Default block size is 2M
- BAT entry size is 4byte
+ | Dynamic_VHD_size = Dynamic Disk Header
+ | + Copy of hard disk footer
+ | + Hard Disk Footer
+ | + Data Block
+ | + BAT
+ | Dynamic Disk header fields
+ | Copy of hard disk footer (512 bytes)
+ | Dynamic Disk Header (1024 bytes)
+ | BAT (Block Allocation table)
+ | Data Block 1
+ | Data Block 2
+ | Data Block n
+ | Hard Disk Footer (512 bytes)
+ | Default block size is 2M
+ | BAT entry size is 4byte
"""
base_vhd_info = self.get_vhd_info(vhd_path)
vhd_type = base_vhd_info['Type']
@@ -148,9 +151,9 @@ def get_internal_vhd_size_by_file_size(self, vhd_path, new_vhd_file_size):
(hs + ddhs + fs)) * bs / (bes + bs)
return max_internal_size
else:
- raise vmutils.HyperVException(_("The %(vhd_type)s type VHD "
- "is not supported") %
- {"vhd_type": vhd_type})
+ vhd_parent = self.get_vhd_parent_path(vhd_path)
+ return self.get_internal_vhd_size_by_file_size(vhd_parent,
+ new_vhd_file_size)
def _get_vhd_dynamic_blk_size(self, vhd_path):
blk_size_offset = VHD_BLK_SIZE_OFFSET
diff --git a/nova/virt/hyperv/vhdutilsv2.py b/nova/virt/hyperv/vhdutilsv2.py
index 44a0f7663f..9c26861cc1 100644
--- a/nova/virt/hyperv/vhdutilsv2.py
+++ b/nova/virt/hyperv/vhdutilsv2.py
@@ -26,7 +26,7 @@
from xml.etree import ElementTree
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import units
from nova.virt.hyperv import constants
from nova.virt.hyperv import vhdutils
@@ -67,11 +67,12 @@ def create_dynamic_vhd(self, path, max_internal_size, format):
self._create_vhd(self._VHD_TYPE_DYNAMIC, vhd_format, path,
max_internal_size=max_internal_size)
- def create_differencing_vhd(self, path, parent_path):
+ def create_differencing_vhd(self, path, parent_path, size=None):
parent_vhd_info = self.get_vhd_info(parent_path)
self._create_vhd(self._VHD_TYPE_DIFFERENCING,
parent_vhd_info["Format"],
- path, parent_path=parent_path)
+ path, parent_path=parent_path,
+ max_internal_size=size)
def _create_vhd(self, vhd_type, format, path, max_internal_size=None,
parent_path=None):
@@ -132,8 +133,9 @@ def get_internal_vhd_size_by_file_size(self, vhd_path,
vhd_info = self.get_vhd_info(vhd_path)
vhd_type = vhd_info['Type']
if vhd_type == self._VHD_TYPE_DIFFERENCING:
- raise vmutils.HyperVException(_("Differencing VHDX images "
- "are not supported"))
+ vhd_parent = self.get_vhd_parent_path(vhd_path)
+ return self.get_internal_vhd_size_by_file_size(vhd_parent,
+ new_vhd_file_size)
else:
try:
with open(vhd_path, 'rb') as f:
diff --git a/nova/virt/hyperv/vif.py b/nova/virt/hyperv/vif.py
index 3b64010c6e..2fa9fe83ac 100644
--- a/nova/virt/hyperv/vif.py
+++ b/nova/virt/hyperv/vif.py
@@ -78,5 +78,5 @@ def plug(self, instance, vif):
self._vmutils.set_nic_connection(vm_name, vif['id'], vswitch_data)
def unplug(self, instance, vif):
- #TODO(alepilotti) Not implemented
+ # TODO(alepilotti) Not implemented
pass
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index ca5286d9f9..c44cda09d9 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -20,14 +20,16 @@
import functools
import os
+from eventlet import timeout as etimeout
from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova import exception
+from nova.i18n import _, _LI, _LW
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
+from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
from nova.openstack.common import units
from nova import utils
@@ -69,7 +71,12 @@
'the ratio between the total RAM assigned to an '
'instance and its startup RAM amount. For example a '
'ratio of 2.0 for an instance with 1024MB of RAM '
- 'implies 512MB of RAM allocated at startup')
+ 'implies 512MB of RAM allocated at startup'),
+ cfg.IntOpt('wait_soft_reboot_seconds',
+ default=60,
+ help='Number of seconds to wait for instance to shut down after'
+ ' soft reboot request is made. We fall back to hard reboot'
+ ' if instance does not shutdown within this window.'),
]
CONF = cfg.CONF
@@ -77,6 +84,10 @@
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('network_api_class', 'nova.network')
+SHUTDOWN_TIME_INCREMENT = 5
+REBOOT_TYPE_SOFT = 'SOFT'
+REBOOT_TYPE_HARD = 'HARD'
+
def check_admin_permissions(function):
@functools.wraps(function)
@@ -136,9 +147,12 @@ def get_info(self, instance):
def _create_root_vhd(self, context, instance):
base_vhd_path = self._imagecache.get_cached_image(context, instance)
+ base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path)
+ base_vhd_size = base_vhd_info['MaxInternalSize']
format_ext = base_vhd_path.split('.')[-1]
root_vhd_path = self._pathutils.get_root_vhd_path(instance['name'],
format_ext)
+ root_vhd_size = instance['root_gb'] * units.Gi
try:
if CONF.use_cow_images:
@@ -147,8 +161,24 @@ def _create_root_vhd(self, context, instance):
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path},
instance=instance)
- self._vhdutils.create_differencing_vhd(root_vhd_path,
- base_vhd_path)
+ vhd_type = self._vhdutils.get_vhd_format(base_vhd_path)
+ if vhd_type == constants.DISK_FORMAT_VHDX:
+ # Differencing vhdx images can be resized, so we use
+ # the flavor size when creating the root image
+ root_vhd_internal_size = (
+ self._vhdutils.get_internal_vhd_size_by_file_size(
+ base_vhd_path, root_vhd_size))
+ if not self._is_resize_needed(root_vhd_path, base_vhd_size,
+ root_vhd_internal_size,
+ instance):
+ root_vhd_internal_size = None
+
+ self._vhdutils.create_differencing_vhd(
+ root_vhd_path, base_vhd_path, root_vhd_internal_size)
+ else:
+ # The base image had already been resized
+ self._vhdutils.create_differencing_vhd(root_vhd_path,
+ base_vhd_path)
else:
LOG.debug("Copying VHD image %(base_vhd_path)s to target: "
"%(root_vhd_path)s",
@@ -157,27 +187,13 @@ def _create_root_vhd(self, context, instance):
instance=instance)
self._pathutils.copyfile(base_vhd_path, root_vhd_path)
- base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path)
- base_vhd_size = base_vhd_info['MaxInternalSize']
- root_vhd_size = instance['root_gb'] * units.Gi
-
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
root_vhd_path, root_vhd_size))
- if root_vhd_internal_size < base_vhd_size:
- error_msg = _("Cannot resize a VHD to a smaller size, the"
- " original size is %(base_vhd_size)s, the"
- " newer size is %(root_vhd_size)s"
- ) % {'base_vhd_size': base_vhd_size,
- 'root_vhd_size': root_vhd_internal_size}
- raise vmutils.HyperVException(error_msg)
- elif root_vhd_internal_size > base_vhd_size:
- LOG.debug("Resizing VHD %(root_vhd_path)s to new "
- "size %(root_vhd_size)s",
- {'root_vhd_size': root_vhd_internal_size,
- 'root_vhd_path': root_vhd_path},
- instance=instance)
+ if self._is_resize_needed(root_vhd_path, base_vhd_size,
+ root_vhd_internal_size,
+ instance):
self._vhdutils.resize_vhd(root_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
@@ -188,6 +204,23 @@ def _create_root_vhd(self, context, instance):
return root_vhd_path
+ def _is_resize_needed(self, vhd_path, old_size, new_size, instance):
+ if new_size < old_size:
+ error_msg = _("Cannot resize a VHD to a smaller size, the"
+ " original size is %(old_size)s, the"
+ " newer size is %(new_size)s"
+ ) % {'old_size': old_size,
+ 'new_size': new_size}
+ raise vmutils.VHDResizeException(error_msg)
+ elif new_size > old_size:
+ LOG.debug("Resizing VHD %(vhd_path)s to new "
+ "size %(new_size)s" %
+ {'new_size': new_size,
+ 'vhd_path': vhd_path},
+ instance=instance)
+ return True
+ return False
+
def create_ephemeral_vhd(self, instance):
eph_vhd_size = instance.get('ephemeral_gb', 0) * units.Gi
if eph_vhd_size:
@@ -224,8 +257,10 @@ def spawn(self, context, instance, image_meta, injected_files,
root_vhd_path, eph_vhd_path)
if configdrive.required_by(instance):
- self._create_config_drive(instance, injected_files,
- admin_password)
+ configdrive_path = self._create_config_drive(instance,
+ injected_files,
+ admin_password)
+ self.attach_config_drive(instance, configdrive_path)
self.power_on(instance)
except Exception:
@@ -305,7 +340,6 @@ def _create_config_drive(self, instance, injected_files, admin_password):
e, instance=instance)
if not CONF.hyperv.config_drive_cdrom:
- drive_type = constants.IDE_DISK
configdrive_path = os.path.join(instance_path,
'configdrive.vhd')
utils.execute(CONF.hyperv.qemu_img_cmd,
@@ -319,11 +353,19 @@ def _create_config_drive(self, instance, injected_files, admin_password):
attempts=1)
self._pathutils.remove(configdrive_path_iso)
else:
- drive_type = constants.IDE_DVD
configdrive_path = configdrive_path_iso
- self._vmutils.attach_ide_drive(instance['name'], configdrive_path,
- 1, 0, drive_type)
+ return configdrive_path
+
+ def attach_config_drive(self, instance, configdrive_path):
+ configdrive_ext = configdrive_path[(configdrive_path.rfind('.') + 1):]
+ # Do the attach here and if there is a certain file format that isn't
+ # supported in constants.DISK_FORMAT_MAP then bomb out.
+ try:
+ self._vmutils.attach_ide_drive(instance.name, configdrive_path,
+ 1, 0, constants.DISK_FORMAT_MAP[configdrive_ext])
+ except KeyError:
+ raise exception.InvalidDiskFormat(disk_format=configdrive_ext)
def _disconnect_volumes(self, volume_drives):
for volume_drive in volume_drives:
@@ -341,7 +383,7 @@ def destroy(self, instance, network_info=None, block_device_info=None,
try:
if self._vmutils.vm_exists(instance_name):
- #Stop the VM first.
+ # Stop the VM first.
self.power_off(instance)
storage = self._vmutils.get_vm_storage_paths(instance_name)
@@ -362,9 +404,39 @@ def destroy(self, instance, network_info=None, block_device_info=None,
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
LOG.debug("Rebooting instance", instance=instance)
+
+ if reboot_type == REBOOT_TYPE_SOFT:
+ if self._soft_shutdown(instance):
+ self.power_on(instance)
+ return
+
self._set_vm_state(instance['name'],
constants.HYPERV_VM_STATE_REBOOT)
+ def _soft_shutdown(self, instance,
+ timeout=CONF.hyperv.wait_soft_reboot_seconds):
+ """Perform a soft shutdown on the VM.
+
+ :return: True if the instance was shutdown within time limit,
+ False otherwise.
+ """
+ LOG.debug("Performing Soft shutdown on instance", instance=instance)
+
+ try:
+ self._vmutils.soft_shutdown_vm(instance.name)
+ if self._wait_for_power_off(instance.name, timeout):
+ LOG.info(_LI("Soft shutdown succeded."), instance=instance)
+ return True
+ except vmutils.HyperVException as e:
+ # Exception is raised when trying to shutdown the instance
+ # while it is still booting.
+ LOG.warning(_LW("Soft shutdown failed: %s"), e, instance=instance)
+ return False
+
+ LOG.warning(_LW("Timed out while waiting for soft shutdown."),
+ instance=instance)
+ return False
+
def pause(self, instance):
"""Pause VM instance."""
LOG.debug("Pause instance", instance=instance)
@@ -412,3 +484,36 @@ def _set_vm_state(self, vm_name, req_state):
LOG.error(_("Failed to change vm state of %(vm_name)s"
" to %(req_state)s"),
{'vm_name': vm_name, 'req_state': req_state})
+
+ def _get_vm_state(self, instance_name):
+ summary_info = self._vmutils.get_vm_summary_info(instance_name)
+ return summary_info['EnabledState']
+
+ def _wait_for_power_off(self, instance_name, time_limit):
+ """Waiting for a VM to be in a disabled state.
+
+ :return: True if the instance is shutdown within time_limit,
+ False otherwise.
+ """
+
+ desired_vm_states = [constants.HYPERV_VM_STATE_DISABLED]
+
+ def _check_vm_status(instance_name):
+ if self._get_vm_state(instance_name) in desired_vm_states:
+ raise loopingcall.LoopingCallDone()
+
+ periodic_call = loopingcall.FixedIntervalLoopingCall(_check_vm_status,
+ instance_name)
+
+ try:
+ # add a timeout to the periodic call.
+ periodic_call.start(interval=SHUTDOWN_TIME_INCREMENT)
+ etimeout.with_timeout(time_limit, periodic_call.wait)
+ except etimeout.Timeout:
+ # VM did not shutdown in the expected time_limit.
+ return False
+ finally:
+ # stop the periodic call, in case of exceptions or Timeout.
+ periodic_call.stop()
+
+ return True
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
old mode 100644
new mode 100755
index e036ba89ff..c9621fe748
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -28,7 +28,7 @@
from oslo.config import cfg
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.hyperv import constants
@@ -80,8 +80,11 @@ class VMUtils(object):
'Msvm_SyntheticEthernetPortSettingData'
_AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement"
+ _SHUTDOWN_COMPONENT = "Msvm_ShutdownComponent"
+
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
+ constants.HYPERV_VM_STATE_SHUTTING_DOWN: 4,
constants.HYPERV_VM_STATE_REBOOT: 10,
constants.HYPERV_VM_STATE_PAUSED: 32768,
constants.HYPERV_VM_STATE_SUSPENDED: 32769}
@@ -111,7 +114,7 @@ def get_vm_summary_info(self, vm_name):
wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS,
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
settings_paths = [v.path_() for v in vmsettings]
- #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
+ # See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
(ret_val, summary_info) = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
@@ -130,7 +133,13 @@ def get_vm_summary_info(self, vm_name):
if si.UpTime is not None:
up_time = long(si.UpTime)
- enabled_state = self._enabled_states_map[si.EnabledState]
+ # Nova requires a valid state to be returned. Hyper-V has more
+ # states than Nova, typically intermediate ones and since there is
+ # no direct mapping for those, ENABLED is the only reasonable option
+ # considering that in all the non mappable states the instance
+ # is running.
+ enabled_state = self._enabled_states_map.get(si.EnabledState,
+ constants.HYPERV_VM_STATE_ENABLED)
summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors,
'EnabledState': enabled_state,
@@ -266,7 +275,7 @@ def get_vm_ide_controller(self, vm_name, ctrller_addr):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_ide_controller(vm, ctrller_addr)
- def get_attached_disks_count(self, scsi_controller_path):
+ def get_attached_disks(self, scsi_controller_path):
volumes = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
@@ -277,7 +286,7 @@ def get_attached_disks_count(self, scsi_controller_path):
self._PHYS_DISK_RES_SUB_TYPE,
'parent':
scsi_controller_path.replace("'", "''")})
- return len(volumes)
+ return volumes
def _get_new_setting_data(self, class_name):
return self._conn.query("SELECT * FROM %s WHERE InstanceID "
@@ -309,10 +318,10 @@ def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive = self._get_new_resource_setting_data(res_sub_type)
- #Set the IDE ctrller as parent.
+ # Set the IDE ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
- #Add the cloned disk drive object to the vm.
+ # Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
@@ -322,11 +331,11 @@ def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
res_sub_type = self._IDE_DVD_RES_SUB_TYPE
res = self._get_new_resource_setting_data(res_sub_type)
- #Set the new drive as the parent.
+ # Set the new drive as the parent.
res.Parent = drive_path
res.Connection = [path]
- #Add the new vhd object as a virtual hard disk to the vm.
+ # Add the new vhd object as a virtual hard disk to the vm.
self._add_virt_resource(res, vm.path_())
def create_scsi_controller(self, vm_name):
@@ -366,28 +375,43 @@ def _get_nic_data_by_name(self, name):
def create_nic(self, vm_name, nic_name, mac_address):
"""Create a (synthetic) nic and attach it to the vm."""
- #Create a new nic
+ # Create a new nic
new_nic_data = self._get_new_setting_data(
self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS)
- #Configure the nic
+ # Configure the nic
new_nic_data.ElementName = nic_name
new_nic_data.Address = mac_address.replace(':', '')
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
- #Add the new nic to the vm
+ # Add the new nic to the vm
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(new_nic_data, vm.path_())
+ def soft_shutdown_vm(self, vm_name):
+ vm = self._lookup_vm_check(vm_name)
+ shutdown_component = vm.associators(
+ wmi_result_class=self._SHUTDOWN_COMPONENT)
+
+ if not shutdown_component:
+ # If no shutdown_component is found, it means the VM is already
+ # in a shutdown state.
+ return
+
+ reason = 'Soft shutdown requested by OpenStack Nova.'
+ (ret_val, ) = shutdown_component[0].InitiateShutdown(Force=False,
+ Reason=reason)
+ self.check_ret_val(ret_val, None)
+
def set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM."""
vm = self._lookup_vm_check(vm_name)
(job_path,
ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state])
- #Invalid state for current operation (32775) typically means that
- #the VM is already in the state requested
+ # Invalid state for current operation (32775) typically means that
+ # the VM is already in the state requested
self.check_ret_val(ret_val, job_path, [0, 32775])
LOG.debug("Successfully changed vm state of %(vm_name)s "
"to %(req_state)s",
@@ -430,7 +454,7 @@ def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
- #Remove the VM. Does not destroy disks.
+ # Remove the VM. Does not destroy disks.
(job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
self.check_ret_val(ret_val, job_path)
diff --git a/nova/virt/hyperv/vmutilsv2.py b/nova/virt/hyperv/vmutilsv2.py
index ed2c0788cc..f5d0d50110 100644
--- a/nova/virt/hyperv/vmutilsv2.py
+++ b/nova/virt/hyperv/vmutilsv2.py
@@ -58,6 +58,7 @@ class VMUtilsV2(vmutils.VMUtils):
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
+ constants.HYPERV_VM_STATE_SHUTTING_DOWN: 4,
constants.HYPERV_VM_STATE_REBOOT: 11,
constants.HYPERV_VM_STATE_PAUSED: 9,
constants.HYPERV_VM_STATE_SUSPENDED: 6}
@@ -104,11 +105,11 @@ def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive = self._get_new_resource_setting_data(res_sub_type)
- #Set the IDE ctrller as parent.
+ # Set the IDE ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
drive.AddressOnParent = drive_addr
- #Add the cloned disk drive object to the vm.
+ # Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
@@ -157,7 +158,7 @@ def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
- #Remove the VM. It does not destroy any associated virtual disk.
+ # Remove the VM. It does not destroy any associated virtual disk.
(job_path, ret_val) = vs_man_svc.DestroySystem(vm.path_())
self.check_ret_val(ret_val, job_path)
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index 7b3b0598fb..c72af82fee 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -22,11 +22,13 @@
from oslo.config import cfg
from nova import exception
+from nova.i18n import _
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt import driver
+from nova.virt.hyperv import constants
from nova.virt.hyperv import utilsfactory
+from nova.virt.hyperv import vmutils
LOG = logging.getLogger(__name__)
@@ -121,18 +123,18 @@ def attach_volume(self, connection_info, instance_name, ebs_root=False):
target_lun = data['target_lun']
target_iqn = data['target_iqn']
- #Getting the mounted disk
+ # Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
if ebs_root:
- #Find the IDE controller for the vm.
+ # Find the IDE controller for the vm.
ctrller_path = self._vmutils.get_vm_ide_controller(
instance_name, 0)
- #Attaching to the first slot
+ # Attaching to the first slot
slot = 0
else:
- #Find the SCSI controller for the vm
+ # Find the SCSI controller for the vm
ctrller_path = self._vmutils.get_vm_scsi_controller(
instance_name)
slot = self._get_free_controller_slot(ctrller_path)
@@ -149,8 +151,13 @@ def attach_volume(self, connection_info, instance_name, ebs_root=False):
self._volutils.logout_storage_target(target_iqn)
def _get_free_controller_slot(self, scsi_controller_path):
- #Slots starts from 0, so the length of the disks gives us the free slot
- return self._vmutils.get_attached_disks_count(scsi_controller_path)
+ attached_disks = self._vmutils.get_attached_disks(scsi_controller_path)
+ used_slots = [int(disk.AddressOnParent) for disk in attached_disks]
+
+ for slot in xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER):
+ if slot not in used_slots:
+ return slot
+ raise vmutils.HyperVException("Exceeded the maximum number of slots")
def detach_volumes(self, block_device_info, instance_name):
mapping = driver.block_device_info_get_mapping(block_device_info)
@@ -172,7 +179,7 @@ def detach_volume(self, connection_info, instance_name):
target_lun = data['target_lun']
target_iqn = data['target_iqn']
- #Getting the mounted disk
+ # Getting the mounted disk
mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn,
target_lun)
@@ -217,7 +224,7 @@ def _get_mounted_disk_from_lun(self, target_iqn, target_lun,
LOG.debug('Device number: %(device_number)s, '
'target lun: %(target_lun)s',
{'device_number': device_number, 'target_lun': target_lun})
- #Finding Mounted disk drive
+ # Finding Mounted disk drive
for i in range(0, CONF.hyperv.volume_attach_retry_count):
mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number(
device_number)
@@ -231,10 +238,10 @@ def _get_mounted_disk_from_lun(self, target_iqn, target_lun,
return mounted_disk_path
def disconnect_volume(self, physical_drive_path):
- #Get the session_id of the ISCSI connection
+ # Get the session_id of the ISCSI connection
session_id = self._volutils.get_session_id_from_mounted_disk(
physical_drive_path)
- #Logging out the target
+ # Logging out the target
self._volutils.execute_log_out(session_id)
def get_target_from_disk_path(self, physical_drive_path):
diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py
index 2740da2911..05be31af90 100644
--- a/nova/virt/hyperv/volumeutils.py
+++ b/nova/virt/hyperv/volumeutils.py
@@ -28,7 +28,7 @@
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.hyperv import basevolumeutils
@@ -64,7 +64,7 @@ def _login_target_portal(self, target_portal):
self.execute('iscsicli.exe', 'RefreshTargetPortal',
target_address, target_port)
else:
- #Adding target portal to iscsi initiator. Sending targets
+ # Adding target portal to iscsi initiator. Sending targets
self.execute('iscsicli.exe', 'AddTargetPortal',
target_address, target_port,
'*', '*', '*', '*', '*', '*', '*', '*', '*', '*', '*',
@@ -74,7 +74,7 @@ def login_storage_target(self, target_lun, target_iqn, target_portal):
"""Ensure that the target is logged in."""
self._login_target_portal(target_portal)
- #Listing targets
+ # Listing targets
self.execute('iscsicli.exe', 'ListTargets')
retry_count = CONF.hyperv.volume_attach_retry_count
diff --git a/nova/virt/hyperv/volumeutilsv2.py b/nova/virt/hyperv/volumeutilsv2.py
index be97b1a4ab..ae2a7f6b2e 100644
--- a/nova/virt/hyperv/volumeutilsv2.py
+++ b/nova/virt/hyperv/volumeutilsv2.py
@@ -26,7 +26,7 @@
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.hyperv import basevolumeutils
diff --git a/nova/virt/imagecache.py b/nova/virt/imagecache.py
index eb551ed609..4045683a3c 100644
--- a/nova/virt/imagecache.py
+++ b/nova/virt/imagecache.py
@@ -78,26 +78,24 @@ def _list_running_instances(self, context, all_instances):
# NOTE(mikal): "instance name" here means "the name of a directory
# which might contain an instance" and therefore needs to include
# historical permutations as well as the current one.
- instance_names.add(instance['name'])
- instance_names.add(instance['uuid'])
-
- if (instance['task_state'] in self.resize_states or
- instance['vm_state'] == vm_states.RESIZED):
- instance_names.add(instance['name'] + '_resize')
- instance_names.add(instance['uuid'] + '_resize')
+ instance_names.add(instance.name)
+ instance_names.add(instance.uuid)
+ if (instance.task_state in self.resize_states or
+ instance.vm_state == vm_states.RESIZED):
+ instance_names.add(instance.name + '_resize')
+ instance_names.add(instance.uuid + '_resize')
for image_key in ['image_ref', 'kernel_id', 'ramdisk_id']:
- try:
- image_ref_str = str(instance[image_key])
- except KeyError:
+ image_ref_str = getattr(instance, image_key)
+ if image_ref_str is None:
continue
local, remote, insts = used_images.get(image_ref_str,
(0, 0, []))
- if instance['host'] == CONF.host:
+ if instance.host == CONF.host:
local += 1
else:
remote += 1
- insts.append(instance['name'])
+ insts.append(instance.name)
used_images[image_ref_str] = (local, remote, insts)
image_popularity.setdefault(image_ref_str, 0)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 23b2a52426..fc6cd8423c 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -24,9 +24,9 @@
from oslo.config import cfg
from nova import exception
+from nova.i18n import _
from nova import image
from nova.openstack.common import fileutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import imageutils
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/virt/interfaces.template b/nova/virt/interfaces.template
index ce5ec05a34..0ec7a5564d 100644
--- a/nova/virt/interfaces.template
+++ b/nova/virt/interfaces.template
@@ -20,6 +20,14 @@ iface {{ ifc.name }} inet static
dns-nameservers {{ ifc.dns }}
{% endif %}
{% if use_ipv6 %}
+{% if libvirt_virt_type == 'lxc' %}
+{% if ifc.address_v6 %}
+ post-up ip -6 addr add {{ ifc.address_v6 }}/{{ifc.netmask_v6 }} dev ${IFACE}
+{% endif %}
+{% if ifc.gateway_v6 %}
+ post-up ip -6 route add default via {{ ifc.gateway_v6 }} dev ${IFACE}
+{% endif %}
+{% else %}
iface {{ ifc.name }} inet6 static
address {{ ifc.address_v6 }}
netmask {{ ifc.netmask_v6 }}
@@ -27,4 +35,5 @@ iface {{ ifc.name }} inet6 static
gateway {{ ifc.gateway_v6 }}
{% endif %}
{% endif %}
+{% endif %}
{% endfor %}
diff --git a/nova/virt/ironic/__init__.py b/nova/virt/ironic/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py
index 746bc6908f..401e83eb7b 100644
--- a/nova/virt/libvirt/blockinfo.py
+++ b/nova/virt/libvirt/blockinfo.py
@@ -77,8 +77,8 @@
from nova import block_device
from nova.compute import flavors
from nova import exception
+from nova.i18n import _
from nova.objects import base as obj_base
-from nova.openstack.common.gettextutils import _
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import driver
@@ -442,6 +442,8 @@ def default_device_names(virt_type, context, instance, root_device_name,
driver_block_device.convert_volumes(
block_device_mapping) +
driver_block_device.convert_snapshots(
+ block_device_mapping) +
+ driver_block_device.convert_blanks(
block_device_mapping))
}
@@ -501,23 +503,6 @@ def get_disk_mapping(virt_type, instance,
driver.block_device_info_get_mapping(block_device_info))
if get_device_name(bdm)]
- if virt_type == "lxc":
- # NOTE(zul): This information is not used by the libvirt driver
- # however we need to populate mapping so the image can be
- # created when the instance is started. This can
- # be removed when we convert LXC to use block devices.
- root_disk_bus = disk_bus
- root_device_type = 'disk'
-
- root_info = get_next_disk_info(mapping,
- root_disk_bus,
- root_device_type,
- boot_index=1)
- mapping['root'] = root_info
- mapping['disk'] = root_info
-
- return mapping
-
if rescue:
rescue_info = get_next_disk_info(mapping,
disk_bus, boot_index=1)
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index 74d8816a10..98db0942e0 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -23,6 +23,8 @@
helpers for populating up config object instances.
"""
+import time
+
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import units
@@ -33,6 +35,9 @@
LOG = logging.getLogger(__name__)
+# Namespace to use for Nova specific metadata items in XML
+NOVA_NS = "http://openstack.org/xmlns/libvirt/nova/1.0"
+
class LibvirtConfigObject(object):
@@ -43,18 +48,21 @@ def __init__(self, **kwargs):
self.ns_prefix = kwargs.get('ns_prefix')
self.ns_uri = kwargs.get('ns_uri')
- @staticmethod
- def _text_node(name, value):
- child = etree.Element(name)
+ def _new_node(self, name, **kwargs):
+ if self.ns_uri is None:
+ return etree.Element(name, **kwargs)
+ else:
+ return etree.Element("{" + self.ns_uri + "}" + name,
+ nsmap={self.ns_prefix: self.ns_uri},
+ **kwargs)
+
+ def _text_node(self, name, value, **kwargs):
+ child = self._new_node(name, **kwargs)
child.text = str(value)
return child
def format_dom(self):
- if self.ns_uri is None:
- return etree.Element(self.root_name)
- else:
- return etree.Element("{" + self.ns_uri + "}" + self.root_name,
- nsmap={self.ns_prefix: self.ns_uri})
+ return self._new_node(self.root_name)
def parse_str(self, xmlstr):
self.parse_dom(etree.fromstring(xmlstr))
@@ -104,6 +112,118 @@ def format_dom(self):
return caps
+class LibvirtConfigCapsNUMATopology(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigCapsNUMATopology, self).__init__(
+ root_name="topology",
+ **kwargs)
+
+ self.cells = []
+
+ def parse_dom(self, xmldoc):
+ super(LibvirtConfigCapsNUMATopology, self).parse_dom(xmldoc)
+
+ xmlcells = xmldoc.getchildren()[0]
+ for xmlcell in xmlcells.getchildren():
+ cell = LibvirtConfigCapsNUMACell()
+ cell.parse_dom(xmlcell)
+ self.cells.append(cell)
+
+ def format_dom(self):
+ topo = super(LibvirtConfigCapsNUMATopology, self).format_dom()
+
+ cells = etree.Element("cells")
+ cells.set("num", str(len(self.cells)))
+ topo.append(cells)
+
+ for cell in self.cells:
+ cells.append(cell.format_dom())
+
+ return topo
+
+
+class LibvirtConfigCapsNUMACell(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigCapsNUMACell, self).__init__(root_name="cell",
+ **kwargs)
+
+ self.id = None
+ self.memory = None
+ self.cpus = []
+
+ def parse_dom(self, xmldoc):
+ super(LibvirtConfigCapsNUMACell, self).parse_dom(xmldoc)
+
+ self.id = int(xmldoc.get("id"))
+ for c in xmldoc.getchildren():
+ if c.tag == "memory":
+ self.memory = int(c.text)
+ elif c.tag == "cpus":
+ for c2 in c.getchildren():
+ cpu = LibvirtConfigCapsNUMACPU()
+ cpu.parse_dom(c2)
+ self.cpus.append(cpu)
+
+ def format_dom(self):
+ cell = super(LibvirtConfigCapsNUMACell, self).format_dom()
+
+ cell.set("id", str(self.id))
+
+ mem = etree.Element("memory")
+ mem.set("unit", "KiB")
+ mem.text = str(self.memory)
+ cell.append(mem)
+
+ cpus = etree.Element("cpus")
+ cpus.set("num", str(len(self.cpus)))
+ for cpu in self.cpus:
+ cpus.append(cpu.format_dom())
+ cell.append(cpus)
+
+ return cell
+
+
+class LibvirtConfigCapsNUMACPU(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigCapsNUMACPU, self).__init__(root_name="cpu",
+ **kwargs)
+
+ self.id = None
+ self.socket_id = None
+ self.core_id = None
+ self.siblings = None
+
+ def parse_dom(self, xmldoc):
+ super(LibvirtConfigCapsNUMACPU, self).parse_dom(xmldoc)
+
+ self.id = int(xmldoc.get("id"))
+ if xmldoc.get("socket_id") is not None:
+ self.socket_id = int(xmldoc.get("socket_id"))
+ if xmldoc.get("core_id") is not None:
+ self.core_id = int(xmldoc.get("core_id"))
+
+ if xmldoc.get("siblings") is not None:
+ self.siblings = hardware.parse_cpu_spec(
+ xmldoc.get("siblings"))
+
+ def format_dom(self):
+ cpu = super(LibvirtConfigCapsNUMACPU, self).format_dom()
+
+ cpu.set("id", str(self.id))
+ if self.socket_id is not None:
+ cpu.set("socket_id", str(self.socket_id))
+ if self.core_id is not None:
+ cpu.set("core_id", str(self.core_id))
+ if self.siblings is not None:
+ cpu.set("siblings",
+ hardware.format_cpu_spec(self.siblings))
+
+ return cpu
+
+
class LibvirtConfigCapsHost(LibvirtConfigObject):
def __init__(self, **kwargs):
@@ -112,6 +232,7 @@ def __init__(self, **kwargs):
self.cpu = None
self.uuid = None
+ self.topology = None
def parse_dom(self, xmldoc):
super(LibvirtConfigCapsHost, self).parse_dom(xmldoc)
@@ -123,6 +244,9 @@ def parse_dom(self, xmldoc):
self.cpu = cpu
elif c.tag == "uuid":
self.uuid = c.text
+ elif c.tag == "topology":
+ self.topology = LibvirtConfigCapsNUMATopology()
+ self.topology.parse_dom(c)
def format_dom(self):
caps = super(LibvirtConfigCapsHost, self).format_dom()
@@ -131,6 +255,8 @@ def format_dom(self):
caps.append(self._text_node("uuid", self.uuid))
if self.cpu:
caps.append(self.cpu.format_dom())
+ if self.topology:
+ caps.append(self.topology.format_dom())
return caps
@@ -339,6 +465,63 @@ def format_dom(self):
return ft
+class LibvirtConfigGuestCPUNUMACell(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigGuestCPUNUMACell, self).__init__(root_name="cell",
+ **kwargs)
+ self.id = None
+ self.cpus = None
+ self.memory = None
+
+ def parse_dom(self, xmldoc):
+ if xmldoc.get("id") is not None:
+ self.id = int(xmldoc.get("id"))
+ if xmldoc.get("memory") is not None:
+ self.memory = int(xmldoc.get("memory"))
+ if xmldoc.get("cpus") is not None:
+ self.cpus = hardware.parse_cpu_spec(xmldoc.get("cpus"))
+
+ def format_dom(self):
+ cell = super(LibvirtConfigGuestCPUNUMACell, self).format_dom()
+
+ if self.id is not None:
+ cell.set("id", str(self.id))
+ if self.cpus is not None:
+ cell.set("cpus",
+ hardware.format_cpu_spec(self.cpus))
+ if self.memory is not None:
+ cell.set("memory", str(self.memory))
+
+ return cell
+
+
+class LibvirtConfigGuestCPUNUMA(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigGuestCPUNUMA, self).__init__(root_name="numa",
+ **kwargs)
+
+ self.cells = []
+
+ def parse_dom(self, xmldoc):
+ super(LibvirtConfigGuestCPUNUMA, self).parse_Dom(xmldoc)
+
+ for child in xmldoc.getchildren():
+ if child.tag == "cell":
+ cell = LibvirtConfigGuestCPUNUMACell()
+ cell.parse_dom(child)
+ self.cells.append(cell)
+
+ def format_dom(self):
+ numa = super(LibvirtConfigGuestCPUNUMA, self).format_dom()
+
+ for cell in self.cells:
+ numa.append(cell.format_dom())
+
+ return numa
+
+
class LibvirtConfigGuestCPU(LibvirtConfigCPU):
def __init__(self, **kwargs):
@@ -346,11 +529,17 @@ def __init__(self, **kwargs):
self.mode = None
self.match = "exact"
+ self.numa = None
def parse_dom(self, xmldoc):
super(LibvirtConfigGuestCPU, self).parse_dom(xmldoc)
self.mode = xmldoc.get('mode')
self.match = xmldoc.get('match')
+ for child in xmldoc.getchildren():
+ if child.tag == "numa":
+ numa = LibvirtConfigGuestCPUNUMA()
+ numa.parse_dom(child)
+ self.numa = numa
def format_dom(self):
cpu = super(LibvirtConfigGuestCPU, self).format_dom()
@@ -358,6 +547,8 @@ def format_dom(self):
if self.mode:
cpu.set("mode", self.mode)
cpu.set("match", self.match)
+ if self.numa is not None:
+ cpu.append(self.numa.format_dom())
return cpu
@@ -496,6 +687,7 @@ def __init__(self, **kwargs):
self.physical_block_size = None
self.readonly = False
self.snapshot = None
+ self.backing_store = None
def format_dom(self):
dev = super(LibvirtConfigGuestDisk, self).format_dom()
@@ -623,15 +815,57 @@ def parse_dom(self, xmldoc):
elif c.tag == 'serial':
self.serial = c.text
-
- for c in xmldoc.getchildren():
- if c.tag == 'target':
+ elif c.tag == 'target':
if self.source_type == 'mount':
self.target_path = c.get('dir')
else:
self.target_dev = c.get('dev')
self.target_bus = c.get('bus', None)
+ elif c.tag == 'backingStore':
+ b = LibvirtConfigGuestDiskBackingStore()
+ b.parse_dom(c)
+ self.backing_store = b
+
+
+class LibvirtConfigGuestDiskBackingStore(LibvirtConfigObject):
+ def __init__(self, **kwargs):
+ super(LibvirtConfigGuestDiskBackingStore, self).__init__(
+ root_name="backingStore", **kwargs)
+
+ self.index = None
+ self.source_type = None
+ self.source_file = None
+ self.source_protocol = None
+ self.source_name = None
+ self.source_hosts = []
+ self.source_ports = []
+ self.driver_name = None
+ self.driver_format = None
+ self.backing_store = None
+
+ def parse_dom(self, xmldoc):
+ super(LibvirtConfigGuestDiskBackingStore, self).parse_dom(xmldoc)
+
+ self.source_type = xmldoc.get('type')
+ self.index = xmldoc.get('index')
+
+ for c in xmldoc.getchildren():
+ if c.tag == 'driver':
+ self.driver_name = c.get('name')
+ self.driver_format = c.get('type')
+ elif c.tag == 'source':
+ self.source_file = c.get('file')
+ self.source_protocol = c.get('protocol')
+ self.source_name = c.get('name')
+ for d in c.getchildren():
+ if d.tag == 'host':
+ self.source_hosts.append(d.get('name'))
+ self.source_ports.append(d.get('port'))
+ elif c.tag == 'backingStore':
+ if c.getchildren():
+ self.backing_store = LibvirtConfigGuestDiskBackingStore()
+ self.backing_store.parse_dom(c)
class LibvirtConfigGuestSnapshotDisk(LibvirtConfigObject):
@@ -789,6 +1023,9 @@ def __init__(self, **kwargs):
**kwargs)
self.net_type = None
+ self.vhostuser_type = None
+ self.vhostuser_path = None
+ self.vhostuser_mode = None
self.target_dev = None
self.model = None
self.mac_addr = None
@@ -824,6 +1061,10 @@ def format_dom(self):
elif self.net_type == "direct":
dev.append(etree.Element("source", dev=self.source_dev,
mode=self.source_mode))
+ elif self.net_type == "vhostuser":
+ dev.append(etree.Element("source", type=self.vhostuser_type,
+ path=self.vhostuser_path,
+ mode=self.vhostuser_mode))
else:
dev.append(etree.Element("source", bridge=self.source_dev))
@@ -1054,16 +1295,23 @@ def __init__(self, **kwargs):
self.type = "pty"
self.source_path = None
+ self.listen_port = None
+ self.listen_host = None
def format_dom(self):
dev = super(LibvirtConfigGuestCharBase, self).format_dom()
dev.set("type", self.type)
+
if self.type == "file":
dev.append(etree.Element("source", path=self.source_path))
elif self.type == "unix":
dev.append(etree.Element("source", mode="bind",
path=self.source_path))
+ elif self.type == "tcp":
+ dev.append(etree.Element("source", mode="bind",
+ host=self.listen_host,
+ service=str(self.listen_port)))
return dev
@@ -1135,6 +1383,27 @@ def format_dom(self):
return dev
+class LibvirtConfigGuestCPUTuneVCPUPin(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigGuestCPUTuneVCPUPin, self).__init__(
+ root_name="vcpupin",
+ **kwargs)
+
+ self.id = None
+ self.cpuset = None
+
+ def format_dom(self):
+ root = super(LibvirtConfigGuestCPUTuneVCPUPin, self).format_dom()
+
+ root.set("vcpu", str(self.id))
+ if self.cpuset is not None:
+ root.set("cpuset",
+ hardware.format_cpu_spec(self.cpuset))
+
+ return root
+
+
class LibvirtConfigGuestCPUTune(LibvirtConfigObject):
def __init__(self, **kwargs):
@@ -1143,6 +1412,7 @@ def __init__(self, **kwargs):
self.shares = None
self.quota = None
self.period = None
+ self.vcpupin = []
def format_dom(self):
root = super(LibvirtConfigGuestCPUTune, self).format_dom()
@@ -1154,6 +1424,66 @@ def format_dom(self):
if self.period is not None:
root.append(self._text_node("period", str(self.period)))
+ for vcpu in self.vcpupin:
+ root.append(vcpu.format_dom())
+
+ return root
+
+
+class LibvirtConfigGuestMemoryBacking(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigGuestMemoryBacking, self).__init__(
+ root_name="memoryBacking", **kwargs)
+
+ self.hugepages = False
+ self.sharedpages = True
+ self.locked = False
+
+ def format_dom(self):
+ root = super(LibvirtConfigGuestMemoryBacking, self).format_dom()
+
+ if self.hugepages:
+ root.append(etree.Element("hugepages"))
+ if not self.sharedpages:
+ root.append(etree.Element("nosharedpages"))
+ if self.locked:
+ root.append(etree.Element("locked"))
+
+ return root
+
+
+class LibvirtConfigGuestMemoryTune(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigGuestMemoryTune, self).__init__(
+ root_name="memtune", **kwargs)
+
+ self.hard_limit = None
+ self.soft_limit = None
+ self.swap_hard_limit = None
+ self.min_guarantee = None
+
+ def format_dom(self):
+ root = super(LibvirtConfigGuestMemoryTune, self).format_dom()
+
+ if self.hard_limit is not None:
+ root.append(self._text_node("hard_limit",
+ str(self.hard_limit),
+ units="K"))
+ if self.soft_limit is not None:
+ root.append(self._text_node("soft_limit",
+ str(self.soft_limit),
+ units="K"))
+ if self.swap_hard_limit is not None:
+ root.append(self._text_node("swap_hard_limit",
+ str(self.swap_hard_limit),
+ units="K"))
+ if self.min_guarantee is not None:
+ root.append(self._text_node("min_guarantee",
+ str(self.min_guarantee),
+ units="K"))
+
return root
@@ -1167,6 +1497,8 @@ def __init__(self, **kwargs):
self.uuid = None
self.name = None
self.memory = 500 * units.Mi
+ self.membacking = None
+ self.memtune = None
self.vcpus = 1
self.cpuset = None
self.cpu = None
@@ -1186,11 +1518,16 @@ def __init__(self, **kwargs):
self.os_smbios = None
self.os_mach_type = None
self.devices = []
+ self.metadata = []
def _format_basic_props(self, root):
root.append(self._text_node("uuid", self.uuid))
root.append(self._text_node("name", self.name))
root.append(self._text_node("memory", self.memory))
+ if self.membacking is not None:
+ root.append(self.membacking.format_dom())
+ if self.memtune is not None:
+ root.append(self.memtune.format_dom())
if self.cpuset is not None:
vcpu = self._text_node("vcpu", self.vcpus)
vcpu.set("cpuset", hardware.format_cpu_spec(self.cpuset))
@@ -1198,6 +1535,12 @@ def _format_basic_props(self, root):
else:
root.append(self._text_node("vcpu", self.vcpus))
+ if len(self.metadata) > 0:
+ metadata = etree.Element("metadata")
+ for m in self.metadata:
+ metadata.append(m.format_dom())
+ root.append(metadata)
+
def _format_os(self, root):
os = etree.Element("os")
type_node = self._text_node("type", self.os_type)
@@ -1435,3 +1778,102 @@ def format_dom(self):
dev.append(backend)
return dev
+
+
+class LibvirtConfigGuestMetaNovaInstance(LibvirtConfigObject):
+
+ def __init__(self):
+ super(LibvirtConfigGuestMetaNovaInstance,
+ self).__init__(root_name="instance",
+ ns_prefix="nova",
+ ns_uri=NOVA_NS)
+
+ self.package = None
+ self.flavor = None
+ self.name = None
+ self.creationTime = None
+ self.owner = None
+ self.roottype = None
+ self.rootid = None
+
+ def format_dom(self):
+ meta = super(LibvirtConfigGuestMetaNovaInstance, self).format_dom()
+
+ pkg = self._new_node("package")
+ pkg.set("version", self.package)
+ meta.append(pkg)
+ if self.name is not None:
+ meta.append(self._text_node("name", self.name))
+ if self.creationTime is not None:
+ timestr = time.strftime("%Y-%m-%d %H:%M:%S",
+ time.gmtime(self.creationTime))
+ meta.append(self._text_node("creationTime", timestr))
+ if self.flavor is not None:
+ meta.append(self.flavor.format_dom())
+ if self.owner is not None:
+ meta.append(self.owner.format_dom())
+
+ if self.roottype is not None and self.rootid is not None:
+ root = self._new_node("root")
+ root.set("type", self.roottype)
+ root.set("uuid", str(self.rootid))
+ meta.append(root)
+
+ return meta
+
+
+class LibvirtConfigGuestMetaNovaFlavor(LibvirtConfigObject):
+
+ def __init__(self):
+ super(LibvirtConfigGuestMetaNovaFlavor,
+ self).__init__(root_name="flavor",
+ ns_prefix="nova",
+ ns_uri=NOVA_NS)
+
+ self.name = None
+ self.memory = None
+ self.disk = None
+ self.swap = None
+ self.ephemeral = None
+ self.vcpus = None
+
+ def format_dom(self):
+ meta = super(LibvirtConfigGuestMetaNovaFlavor, self).format_dom()
+ meta.set("name", self.name)
+ if self.memory is not None:
+ meta.append(self._text_node("memory", str(self.memory)))
+ if self.disk is not None:
+ meta.append(self._text_node("disk", str(self.disk)))
+ if self.swap is not None:
+ meta.append(self._text_node("swap", str(self.swap)))
+ if self.ephemeral is not None:
+ meta.append(self._text_node("ephemeral", str(self.ephemeral)))
+ if self.vcpus is not None:
+ meta.append(self._text_node("vcpus", str(self.vcpus)))
+ return meta
+
+
+class LibvirtConfigGuestMetaNovaOwner(LibvirtConfigObject):
+
+ def __init__(self):
+ super(LibvirtConfigGuestMetaNovaOwner,
+ self).__init__(root_name="owner",
+ ns_prefix="nova",
+ ns_uri=NOVA_NS)
+
+ self.userid = None
+ self.username = None
+ self.projectid = None
+ self.projectname = None
+
+ def format_dom(self):
+ meta = super(LibvirtConfigGuestMetaNovaOwner, self).format_dom()
+ if self.userid is not None and self.username is not None:
+ user = self._text_node("user", self.username)
+ user.set("uuid", self.userid)
+ meta.append(user)
+ if self.projectid is not None and self.projectname is not None:
+ project = self._text_node("project", self.projectname)
+ project.set("uuid", self.projectid)
+ meta.append(project)
+ return meta
diff --git a/nova/virt/libvirt/designer.py b/nova/virt/libvirt/designer.py
index 25e2a1c401..62c82aa591 100644
--- a/nova/virt/libvirt/designer.py
+++ b/nova/virt/libvirt/designer.py
@@ -56,6 +56,18 @@ def set_vif_host_backend_ethernet_config(conf, tapname):
conf.script = ""
+def set_vif_host_backend_vhostuser_config(conf, mode, path=None):
+ """Populate a LibvirtConfigGuestInterface instance
+ with vhostuser socket details
+ """
+
+ conf.net_type = "vhostuser"
+ # unix is the only supported type in libvirt
+ conf.vhostuser_type = "unix"
+ conf.vhostuser_path = path or "/var/lib/libvirt/qemu/vhostuser"
+ conf.vhostuser_mode = mode
+
+
def set_vif_host_backend_ovs_config(conf, brname, interfaceid, tapname=None):
"""Populate a LibvirtConfigGuestInterface instance
with host backend details for an OpenVSwitch bridge.
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 914ea95f7b..f8208833e1 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -46,6 +46,7 @@
from eventlet import util as eventlet_util
from lxml import etree
from oslo.config import cfg
+import six
from nova.api.metadata import base as instance_metadata
from nova import block_device
@@ -56,21 +57,20 @@
from nova.compute import vm_mode
from nova import context as nova_context
from nova import exception
+from nova.i18n import _
+from nova.i18n import _LE
+from nova.i18n import _LI
+from nova.i18n import _LW
from nova import image
from nova import objects
-from nova.objects import flavor as flavor_obj
-from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common.gettextutils import _LE
-from nova.openstack.common.gettextutils import _LI
-from nova.openstack.common.gettextutils import _LW
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
+from nova.openstack.common import timeutils
from nova.openstack.common import units
from nova.openstack.common import xmlutils
from nova.pci import pci_manager
@@ -81,7 +81,9 @@
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
+from nova.virt import diagnostics
from nova.virt.disk import api as disk
+from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import firewall
@@ -92,7 +94,9 @@
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import lvm
+from nova.virt.libvirt import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
+from nova.virt.libvirt import vif as libvirt_vif
from nova.virt import netutils
from nova.virt import watchdog_actions
from nova import volume
@@ -106,11 +110,6 @@
LOG = logging.getLogger(__name__)
libvirt_opts = [
- cfg.StrOpt('version_cap',
- default='1.2.2', # Must always match the version in the gate
- help='Limit use of features from newer libvirt versions. '
- 'Defaults to the version that is used for automated '
- 'testing of OpenStack.'),
cfg.StrOpt('rescue_image_id',
help='Rescue ami image. This will not be used if an image id '
'is provided by the user.'),
@@ -147,10 +146,12 @@
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
- default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER',
+ default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
@@ -160,11 +161,6 @@
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
- cfg.StrOpt('vif_driver',
- default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver',
- help='DEPRECATED. The libvirt VIF driver to configure the VIFs.'
- 'This option is deprecated and will be removed in the '
- 'Juno release.'),
cfg.ListOpt('volume_drivers',
default=[
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
@@ -218,6 +214,20 @@
help='A path to a device that will be used as source of '
'entropy on the host. Permitted options are: '
'/dev/random or /dev/hwrng'),
+ cfg.ListOpt('hw_machine_type',
+ help='For qemu or KVM guests, set this option to specify '
+ 'a default machine type per host architecture. '
+ 'You can find a list of supported machine types '
+ 'in your environment by checking the output of '
+ 'the "virsh capabilities"command. The format of the '
+ 'value for this config option is host-arch=machine-type. '
+ 'For example: x86_64=machinetype1,armv7l=machinetype2'),
+ cfg.StrOpt('sysinfo_serial',
+ default='auto',
+ help='The data source used to the populate the host "serial" '
+ 'UUID exposed to guest in the virtual BIOS. Permitted '
+ 'options are "hardware", "os", "none" or "auto" '
+ '(default).'),
]
CONF = cfg.CONF
@@ -290,10 +300,9 @@ def repr_method(self):
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
-MIN_LIBVIRT_VERSION = (0, 9, 6)
+MIN_LIBVIRT_VERSION = (0, 9, 11)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
-MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10)
MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1)
# Live snapshot requirements
REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
@@ -306,6 +315,9 @@ def repr_method(self):
MIN_LIBVIRT_BLOCKIO_VERSION = (0, 10, 2)
# BlockJobInfo management requirement
MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1)
+# Relative block commit (feature is detected,
+# this version is only used for messaging)
+MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION = (1, 2, 7)
def libvirt_error_handler(context, err):
@@ -325,7 +337,7 @@ def __init__(self, virtapi, read_only=False):
global libvirt
if libvirt is None:
- libvirt = __import__('libvirt')
+ libvirt = importutils.import_module('libvirt')
self._skip_list_all_domains = False
self._host_state = None
@@ -342,8 +354,8 @@ def __init__(self, virtapi, read_only=False):
self.virtapi,
get_connection=self._get_connection)
- vif_class = importutils.import_class(CONF.libvirt.vif_driver)
- self.vif_driver = vif_class(self._get_connection)
+ self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver(
+ self._get_connection)
self.volume_drivers = driver.driver_dict_from_config(
CONF.libvirt.volume_drivers, self)
@@ -380,6 +392,23 @@ def __init__(self, virtapi, read_only=False):
self._volume_api = volume.API()
self._image_api = image.API()
+ sysinfo_serial_funcs = {
+ 'none': lambda: None,
+ 'hardware': self._get_host_sysinfo_serial_hardware,
+ 'os': self._get_host_sysinfo_serial_os,
+ 'auto': self._get_host_sysinfo_serial_auto,
+ }
+
+ self._sysinfo_serial_func = sysinfo_serial_funcs.get(
+ CONF.libvirt.sysinfo_serial)
+ if not self._sysinfo_serial_func:
+ raise exception.NovaException(
+ _("Unexpected sysinfo_serial setting '%(actual)s'. "
+ "Permitted values are %(expect)s'") %
+ {'actual': CONF.libvirt.sysinfo_serial,
+ 'expect': ', '.join("'%s'" % k for k in
+ sysinfo_serial_funcs.keys())})
+
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
@@ -418,14 +447,6 @@ def _conn_has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None):
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
-
- if CONF.libvirt.version_cap:
- libvirt_version_cap = utils.convert_version_to_int(
- utils.convert_version_to_tuple(
- CONF.libvirt.version_cap))
- if libvirt_version > libvirt_version_cap:
- libvirt_version = libvirt_version_cap
-
if libvirt_version < utils.convert_version_to_int(lv_ver):
return False
@@ -632,6 +653,12 @@ def init_host(self, host):
libvirt.virEventRegisterDefaultImpl()
self._do_quality_warnings()
+ # Stop libguestfs using KVM unless we're also configured
+ # to use this. This solves problem where people need to
+ # stop Nova use of KVM because nested-virt is broken
+ if CONF.libvirt.virt_type != "kvm":
+ guestfs.force_tcg()
+
if not self._has_min_version(MIN_LIBVIRT_VERSION):
major = MIN_LIBVIRT_VERSION[0]
minor = MIN_LIBVIRT_VERSION[1]
@@ -770,10 +797,6 @@ def _connect(uri, read_only):
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
- def get_num_instances(self):
- """Efficient override of base get_num_instances method."""
- return self._conn.numOfDomains()
-
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
try:
@@ -854,12 +877,6 @@ def _list_instance_domains(self, only_running=True, only_guests=True):
return doms
- # TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
- def _list_instance_ids(self):
- if self._conn.numOfDomains() == 0:
- return []
- return self._conn.listDomainsID()
-
def list_instances(self):
names = []
for dom in self._list_instance_domains(only_running=False):
@@ -894,8 +911,8 @@ def unplug_vifs(self, instance, network_info):
def _teardown_container(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
- container_root_device = instance.get('root_device_name')
- disk.teardown_container(container_dir, container_root_device)
+ rootfs_dev = instance.system_metadata.get('rootfs_device_name')
+ disk.teardown_container(container_dir, rootfs_dev)
def _destroy(self, instance):
try:
@@ -1017,9 +1034,11 @@ def _undefine_domain(self, instance):
{'errcode': errcode, 'e': e}, instance=instance)
def cleanup(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True, migrate_data=None):
+ destroy_disks=True, migrate_data=None, destroy_vifs=True):
self._undefine_domain(instance)
- self._unplug_vifs(instance, network_info, True)
+ if destroy_vifs:
+ self._unplug_vifs(instance, network_info, True)
+
retry = True
while retry:
try:
@@ -1082,7 +1101,7 @@ def cleanup(self, context, instance, network_info, block_device_info=None,
with excutils.save_and_reraise_exception() as ctxt:
if destroy_disks:
# Don't block on Volume errors if we're trying to
- # delete the instance as we may be patially created
+ # delete the instance as we may be partially created
# or deleted
ctxt.reraise = False
LOG.warn(_LW("Ignoring Volume Error on vol %(vol_id)s "
@@ -1097,22 +1116,19 @@ def cleanup(self, context, instance, network_info, block_device_info=None,
if destroy_disks:
self._cleanup_lvm(instance)
- #NOTE(haomai): destroy volumes if needed
+ # NOTE(haomai): destroy volumes if needed
if CONF.libvirt.images_type == 'rbd':
self._cleanup_rbd(instance)
- def _cleanup_rbd(self, instance):
- pool = CONF.libvirt.images_rbd_pool
- volumes = libvirt_utils.list_rbd_volumes(pool)
- pattern = instance['uuid']
-
- def belongs_to_instance(disk):
- return disk.startswith(pattern)
-
- volumes = filter(belongs_to_instance, volumes)
+ @staticmethod
+ def _get_rbd_driver():
+ return rbd_utils.RBDDriver(
+ pool=CONF.libvirt.images_rbd_pool,
+ ceph_conf=CONF.libvirt.images_rbd_ceph_conf,
+ rbd_user=CONF.libvirt.rbd_user)
- if volumes:
- libvirt_utils.remove_rbd_volumes(pool, *volumes)
+ def _cleanup_rbd(self, instance):
+ LibvirtDriver._get_rbd_driver().cleanup_volumes(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""
@@ -1298,10 +1314,12 @@ def attach_volume(self, context, connection_info, instance, mountpoint,
with excutils.save_and_reraise_exception():
self._disconnect_volume(connection_info, disk_dev)
- def _swap_volume(self, domain, disk_path, new_path):
+ def _swap_volume(self, domain, disk_path, new_path, resize_to):
"""Swap existing disk with a new block device."""
- # Save a copy of the domain's running XML file
- xml = domain.XMLDesc(0)
+ # Save a copy of the domain's persistent XML file
+ xml = domain.XMLDesc(
+ libvirt.VIR_DOMAIN_XML_INACTIVE |
+ libvirt.VIR_DOMAIN_XML_SECURE)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
@@ -1329,11 +1347,19 @@ def _swap_volume(self, domain, disk_path, new_path):
domain.blockJobAbort(disk_path,
libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
+ if resize_to:
+ # NOTE(alex_xu): domain.blockJobAbort isn't sync call. This
+ # is bug in libvirt. So we need waiting for the pivot is
+ # finished. libvirt bug #1119173
+ while self._wait_for_block_job(domain, disk_path,
+ wait_for_job_clean=True):
+ time.sleep(0.5)
+ domain.blockResize(disk_path, resize_to * units.Gi / units.Ki)
finally:
self._conn.defineXML(xml)
def swap_volume(self, old_connection_info,
- new_connection_info, instance, mountpoint):
+ new_connection_info, instance, mountpoint, resize_to):
instance_name = instance['name']
virt_dom = self._lookup_by_name(instance_name)
disk_dev = mountpoint.rpartition("/")[2]
@@ -1351,7 +1377,7 @@ def swap_volume(self, old_connection_info,
self._disconnect_volume(new_connection_info, disk_dev)
raise NotImplementedError(_("Swap only supports host devices"))
- self._swap_volume(virt_dom, disk_dev, conf.source_path)
+ self._swap_volume(virt_dom, disk_dev, conf.source_path, resize_to)
self._disconnect_volume(old_connection_info, disk_dev)
@staticmethod
@@ -1428,13 +1454,13 @@ def detach_volume(self, connection_info, instance, mountpoint,
def attach_interface(self, instance, image_meta, vif):
virt_dom = self._lookup_by_name(instance['name'])
- flavor = flavor_obj.Flavor.get_by_id(
+ flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
- flavor)
+ flavor, CONF.libvirt.virt_type)
try:
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = LIBVIRT_POWER_STATE[virt_dom.info()[0]]
@@ -1445,14 +1471,16 @@ def attach_interface(self, instance, image_meta, vif):
LOG.error(_LE('attaching network adapter failed.'),
instance=instance)
self.vif_driver.unplug(instance, vif)
- raise exception.InterfaceAttachFailed(instance)
+ raise exception.InterfaceAttachFailed(
+ instance_uuid=instance['uuid'])
def detach_interface(self, instance, vif):
virt_dom = self._lookup_by_name(instance['name'])
- flavor = flavor_obj.Flavor.get_by_id(
+ flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
- cfg = self.vif_driver.get_config(instance, vif, None, flavor)
+ cfg = self.vif_driver.get_config(instance, vif, None, flavor,
+ CONF.libvirt.virt_type)
try:
self.vif_driver.unplug(instance, vif)
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
@@ -1469,7 +1497,8 @@ def detach_interface(self, instance, vif):
else:
LOG.error(_LE('detaching network adapter failed.'),
instance=instance)
- raise exception.InterfaceDetachFailed(instance)
+ raise exception.InterfaceDetachFailed(
+ instance_uuid=instance['uuid'])
def _create_snapshot_metadata(self, base, instance, img_fmt, snp_name):
metadata = {'is_public': False,
@@ -1619,7 +1648,8 @@ def snapshot(self, context, instance, image_id, update_task_state):
instance=instance)
@staticmethod
- def _wait_for_block_job(domain, disk_path, abort_on_error=False):
+ def _wait_for_block_job(domain, disk_path, abort_on_error=False,
+ wait_for_job_clean=False):
"""Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
@@ -1640,15 +1670,19 @@ def _wait_for_block_job(domain, disk_path, abort_on_error=False):
except Exception:
return False
- if cur == end:
- return False
+ if wait_for_job_clean:
+ job_ended = not status
else:
- return True
+ job_ended = cur == end
+
+ return not job_ended
def _live_snapshot(self, domain, disk_path, out_path, image_format):
"""Snapshot an instance without downtime."""
- # Save a copy of the domain's running XML file
- xml = domain.XMLDesc(0)
+ # Save a copy of the domain's persistent XML file
+ xml = domain.XMLDesc(
+ libvirt.VIR_DOMAIN_XML_INACTIVE |
+ libvirt.VIR_DOMAIN_XML_SECURE)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
@@ -1740,27 +1774,27 @@ def _volume_snapshot_create(self, context, instance, domain,
network_disks_to_snap = [] # network disks (netfs, gluster, etc.)
disks_to_skip = [] # local disks not snapshotted
- for disk in device_info.devices:
- if (disk.root_name != 'disk'):
+ for guest_disk in device_info.devices:
+ if (guest_disk.root_name != 'disk'):
continue
- if (disk.target_dev is None):
+ if (guest_disk.target_dev is None):
continue
- if (disk.serial is None or disk.serial != volume_id):
- disks_to_skip.append(disk.target_dev)
+ if (guest_disk.serial is None or guest_disk.serial != volume_id):
+ disks_to_skip.append(guest_disk.target_dev)
continue
# disk is a Cinder volume with the correct volume_id
disk_info = {
- 'dev': disk.target_dev,
- 'serial': disk.serial,
- 'current_file': disk.source_path,
- 'source_protocol': disk.source_protocol,
- 'source_name': disk.source_name,
- 'source_hosts': disk.source_hosts,
- 'source_ports': disk.source_ports
+ 'dev': guest_disk.target_dev,
+ 'serial': guest_disk.serial,
+ 'current_file': guest_disk.source_path,
+ 'source_protocol': guest_disk.source_protocol,
+ 'source_name': guest_disk.source_name,
+ 'source_hosts': guest_disk.source_hosts,
+ 'source_ports': guest_disk.source_ports
}
# Determine path for new_file based on current path
@@ -1852,8 +1886,8 @@ def volume_snapshot_create(self, context, instance, volume_id,
- snapshot_id : ID of snapshot
- type : qcow2 /
- new_file : qcow2 file created by Cinder which
- becomes the VM's active image after
- the snapshot is complete
+ becomes the VM's active image after
+ the snapshot is complete
"""
LOG.debug("volume_snapshot_create: create_info: %(c_info)s",
@@ -1943,8 +1977,9 @@ def _volume_snapshot_delete(self, context, instance, volume_id,
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
- ##### Find dev name
+ # Find dev name
my_dev = None
+ active_disk = None
xml = virt_dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
@@ -1952,17 +1987,24 @@ def _volume_snapshot_delete(self, context, instance, volume_id,
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
- for disk in device_info.devices:
- if (disk.root_name != 'disk'):
+ active_disk_object = None
+
+ for guest_disk in device_info.devices:
+ if (guest_disk.root_name != 'disk'):
continue
- if (disk.target_dev is None or disk.serial is None):
+ if (guest_disk.target_dev is None or guest_disk.serial is None):
continue
- if disk.serial == volume_id:
- my_dev = disk.target_dev
+ if guest_disk.serial == volume_id:
+ my_dev = guest_disk.target_dev
+
+ active_disk = guest_disk.source_path
+ active_protocol = guest_disk.source_protocol
+ active_disk_object = guest_disk
+ break
- if my_dev is None:
+ if my_dev is None or (active_disk is None and active_protocol is None):
msg = _('Disk with id: %s '
'not found attached to instance.') % volume_id
LOG.debug('Domain XML: %s', xml)
@@ -1970,15 +2012,57 @@ def _volume_snapshot_delete(self, context, instance, volume_id,
LOG.debug("found device at %s", my_dev)
+ def _get_snap_dev(filename, backing_store):
+ if filename is None:
+ msg = _('filename cannot be None')
+ raise exception.NovaException(msg)
+
+ # libgfapi delete
+ LOG.debug("XML: %s" % xml)
+
+ LOG.debug("active disk object: %s" % active_disk_object)
+
+ # determine reference within backing store for desired image
+ filename_to_merge = filename
+ matched_name = None
+ b = backing_store
+ index = None
+
+ current_filename = active_disk_object.source_name.split('/')[1]
+ if current_filename == filename_to_merge:
+ return my_dev + '[0]'
+
+ while b is not None:
+ source_filename = b.source_name.split('/')[1]
+ if source_filename == filename_to_merge:
+ LOG.debug('found match: %s' % b.source_name)
+ matched_name = b.source_name
+ index = b.index
+ break
+
+ b = b.backing_store
+
+ if matched_name is None:
+ msg = _('no match found for %s') % (filename_to_merge)
+ raise exception.NovaException(msg)
+
+ LOG.debug('index of match (%s) is %s' % (b.source_name, index))
+
+ my_snap_dev = '%s[%s]' % (my_dev, index)
+ return my_snap_dev
+
if delete_info['merge_target_file'] is None:
# pull via blockRebase()
# Merge the most recent snapshot into the active image
rebase_disk = my_dev
- rebase_base = delete_info['file_to_merge']
- rebase_bw = 0
rebase_flags = 0
+ rebase_base = delete_info['file_to_merge'] # often None
+ if active_protocol is not None:
+ rebase_base = _get_snap_dev(delete_info['file_to_merge'],
+ active_disk_object.backing_store)
+ rebase_bw = 0
LOG.debug('disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, flags: %(flags)s',
@@ -1993,27 +2077,53 @@ def _volume_snapshot_delete(self, context, instance, volume_id,
if result == 0:
LOG.debug('blockRebase started successfully')
- while self._wait_for_block_job(virt_dom, rebase_disk,
+ while self._wait_for_block_job(virt_dom, my_dev,
abort_on_error=True):
LOG.debug('waiting for blockRebase job completion')
time.sleep(0.5)
else:
# commit with blockCommit()
-
+ my_snap_base = None
+ my_snap_top = None
commit_disk = my_dev
- commit_base = delete_info['merge_target_file']
- commit_top = delete_info['file_to_merge']
+ commit_flags = 0
+
+ if active_protocol is not None:
+ my_snap_base = _get_snap_dev(delete_info['merge_target_file'],
+ active_disk_object.backing_store)
+ my_snap_top = _get_snap_dev(delete_info['file_to_merge'],
+ active_disk_object.backing_store)
+ try:
+ commit_flags |= libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
+ except AttributeError:
+ ver = '.'.join(
+ [str(x) for x in
+ MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION])
+ msg = _("Relative blockcommit support was not detected. "
+ "Libvirt '%s' or later is required for online "
+ "deletion of network storage-backed volume "
+ "snapshots.") % ver
+ raise exception.Invalid(msg)
+
+ commit_base = my_snap_base or delete_info['merge_target_file']
+ commit_top = my_snap_top or delete_info['file_to_merge']
bandwidth = 0
- flags = 0
+
+ LOG.debug('will call blockCommit with commit_disk=%(commit_disk)s '
+ 'commit_base=%(commit_base)s '
+ 'commit_top=%(commit_top)s '
+ % {'commit_disk': commit_disk,
+ 'commit_base': commit_base,
+ 'commit_top': commit_top})
result = virt_dom.blockCommit(commit_disk, commit_base, commit_top,
- bandwidth, flags)
+ bandwidth, commit_flags)
if result == 0:
LOG.debug('blockCommit started successfully')
- while self._wait_for_block_job(virt_dom, commit_disk,
+ while self._wait_for_block_job(virt_dom, my_dev,
abort_on_error=True):
LOG.debug('waiting for blockCommit job completion')
time.sleep(0.5)
@@ -2142,6 +2252,7 @@ def _hard_reboot(self, context, instance, network_info,
# does we need to (re)generate the xml after the images
# are in place.
xml = self._get_guest_xml(context, instance, network_info, disk_info,
+ image_meta=image_meta,
block_device_info=block_device_info,
write_to_disk=True)
@@ -2182,8 +2293,85 @@ def unpause(self, instance):
dom = self._lookup_by_name(instance['name'])
dom.resume()
- def power_off(self, instance):
+ def _clean_shutdown(self, instance, timeout, retry_interval):
+ """Attempt to shutdown the instance gracefully.
+
+ :param instance: The instance to be shutdown
+ :param timeout: How long to wait in seconds for the instance to
+ shutdown
+ :param retry_interval: How often in seconds to signal the instance
+ to shutdown while waiting
+
+ :returns: True if the shutdown succeeded
+ """
+
+ # List of states that represent a shutdown instance
+ SHUTDOWN_STATES = [power_state.SHUTDOWN,
+ power_state.CRASHED]
+
+ try:
+ dom = self._lookup_by_name(instance["name"])
+ except exception.InstanceNotFound:
+ # If the instance has gone then we don't need to
+ # wait for it to shutdown
+ return True
+
+ (state, _max_mem, _mem, _cpus, _t) = dom.info()
+ state = LIBVIRT_POWER_STATE[state]
+ if state in SHUTDOWN_STATES:
+ LOG.info(_LI("Instance already shutdown."),
+ instance=instance)
+ return True
+
+ LOG.debug("Shutting down instance from state %s", state,
+ instance=instance)
+ dom.shutdown()
+ retry_countdown = retry_interval
+
+ for sec in six.moves.range(timeout):
+
+ dom = self._lookup_by_name(instance["name"])
+ (state, _max_mem, _mem, _cpus, _t) = dom.info()
+ state = LIBVIRT_POWER_STATE[state]
+
+ if state in SHUTDOWN_STATES:
+ LOG.info(_LI("Instance shutdown successfully after %d "
+ "seconds."), sec, instance=instance)
+ return True
+
+ # Note(PhilD): We can't assume that the Guest was able to process
+ # any previous shutdown signal (for example it may
+ # have still been startingup, so within the overall
+ # timeout we re-trigger the shutdown every
+ # retry_interval
+ if retry_countdown == 0:
+ retry_countdown = retry_interval
+ # Instance could shutdown at any time, in which case we
+ # will get an exception when we call shutdown
+ try:
+ LOG.debug("Instance in state %s after %d seconds - "
+ "resending shutdown", state, sec,
+ instance=instance)
+ dom.shutdown()
+ except libvirt.libvirtError:
+ # Assume this is because its now shutdown, so loop
+ # one more time to clean up.
+ LOG.debug("Ignoring libvirt exception from shutdown "
+ "request.", instance=instance)
+ continue
+ else:
+ retry_countdown -= 1
+
+ time.sleep(1)
+
+ LOG.info(_LI("Instance failed to shutdown in %d seconds."),
+ timeout, instance=instance)
+ return False
+
+ def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
+ if timeout:
+ self._clean_shutdown(instance, timeout, retry_interval)
self._destroy(instance)
def power_on(self, context, instance, network_info,
@@ -2515,12 +2703,13 @@ def _create_local(target, local_size, unit='G',
def _create_ephemeral(self, target, ephemeral_size,
fs_label, os_type, is_block_dev=False,
- max_size=None):
+ max_size=None, specified_fs=None):
if not is_block_dev:
self._create_local(target, ephemeral_size)
# Run as root only for block devices.
- disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev)
+ disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
+ specified_fs=specified_fs)
@staticmethod
def _create_swap(target, swap_mb, max_size=None):
@@ -2534,9 +2723,9 @@ def _get_console_log_path(instance):
'console.log')
@staticmethod
- def _get_disk_config_path(instance):
+ def _get_disk_config_path(instance, suffix=''):
return os.path.join(libvirt_utils.get_instance_path(instance),
- 'disk.config')
+ 'disk.config' + suffix)
def _chown_console_log_for_instance(self, instance):
console_log = self._get_console_log_path(instance)
@@ -2559,7 +2748,7 @@ def _is_booted_from_volume(instance, disk_mapping):
or 'disk' not in disk_mapping)
def _inject_data(self, instance, network_info, admin_pass, files, suffix):
- """Injects data in an disk image
+ """Injects data in a disk image
Helper used for injecting data in a disk image file system.
@@ -2568,7 +2757,7 @@ def _inject_data(self, instance, network_info, admin_pass, files, suffix):
network_info -- a dict that refers network speficications
admin_pass -- a string used to set an admin password
files -- a list of files needs to be injected
- suffix -- a string used as a image name suffix
+ suffix -- a string used as an image name suffix
"""
# Handles the partition need to be used.
target_partition = None
@@ -2590,7 +2779,8 @@ def _inject_data(self, instance, network_info, admin_pass, files, suffix):
admin_pass = None
# Handles the network injection.
- net = netutils.get_injected_network_template(network_info)
+ net = netutils.get_injected_network_template(
+ network_info, libvirt_virt_type=CONF.libvirt.virt_type)
# Handles the metadata injection
metadata = instance.get('metadata')
@@ -2603,17 +2793,17 @@ def _inject_data(self, instance, network_info, admin_pass, files, suffix):
image_type)
img_id = instance['image_ref']
+ if not injection_image.check_image_exists():
+ LOG.warn(_LW('Image %s not found on disk storage. '
+ 'Continue without injecting data'),
+ injection_image.path, instance=instance)
+ return
try:
- if injection_image.check_image_exists():
- disk.inject_data(injection_image.path,
- key, net, metadata, admin_pass, files,
- partition=target_partition,
- use_cow=CONF.use_cow_images,
- mandatory=('files',))
- else:
- LOG.warn(_LW('Image %s not found on disk storage. '
- 'Continue without injecting data'),
- injection_image.path, instance=instance)
+ disk.inject_data(injection_image.path,
+ key, net, metadata, admin_pass, files,
+ partition=target_partition,
+ use_cow=CONF.use_cow_images,
+ mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error injecting data into image '
@@ -2689,13 +2879,23 @@ def raw(fname):
if size == 0 or suffix == '.rescue':
size = None
- image('disk').cache(fetch_func=libvirt_utils.fetch_image,
- context=context,
- filename=root_fname,
- size=size,
- image_id=disk_images['image_id'],
- user_id=instance['user_id'],
- project_id=instance['project_id'])
+ backend = image('disk')
+ if backend.SUPPORTS_CLONE:
+ def clone_fallback_to_fetch(*args, **kwargs):
+ try:
+ backend.clone(context, disk_images['image_id'])
+ except exception.ImageUnacceptable:
+ libvirt_utils.fetch_image(*args, **kwargs)
+ fetch_func = clone_fallback_to_fetch
+ else:
+ fetch_func = libvirt_utils.fetch_image
+ backend.cache(fetch_func=fetch_func,
+ context=context,
+ filename=root_fname,
+ size=size,
+ image_id=disk_images['image_id'],
+ user_id=instance['user_id'],
+ project_id=instance['project_id'])
# Lookup the filesystem type if required
os_type_with_default = disk.get_fs_type_for_os_type(
@@ -2718,6 +2918,12 @@ def raw(fname):
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
+
+ specified_fs = eph.get('guest_format')
+ if specified_fs and not self.is_supported_fs_format(specified_fs):
+ msg = _("%s format is not supported") % specified_fs
+ raise exception.InvalidBDMFormat(details=msg)
+
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance["os_type"],
@@ -2728,7 +2934,8 @@ def raw(fname):
fetch_func=fn,
filename=fname,
size=size,
- ephemeral_size=eph['size'])
+ ephemeral_size=eph['size'],
+ specified_fs=specified_fs)
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
@@ -2759,7 +2966,7 @@ def raw(fname):
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
- configdrive_path = self._get_disk_config_path(instance)
+ configdrive_path = self._get_disk_config_path(instance, suffix)
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
@@ -2771,6 +2978,16 @@ def raw(fname):
'with error: %s'),
e, instance=instance)
+ def dummy_fetch_func(target, *args, **kwargs):
+ # NOTE(sileht): this is never called because the
+ # the target have already been created by
+ # cdb.make_drive call
+ pass
+
+ raw('disk.config').cache(fetch_func=dummy_fetch_func,
+ context=context,
+ filename='disk.config' + suffix)
+
# File injection only if needed
elif inject_files and CONF.libvirt.inject_partition != -2:
if booted_from_volume:
@@ -2880,7 +3097,7 @@ def _set_host_enabled(self, enabled,
ctx = nova_context.get_admin_context()
try:
- service = service_obj.Service.get_by_compute_host(ctx, CONF.host)
+ service = objects.Service.get_by_compute_host(ctx, CONF.host)
if service.disabled != disable_service:
# Note(jang): this is a quick fix to stop operator-
@@ -2947,29 +3164,6 @@ def _get_host_uuid(self):
caps = self._get_host_capabilities()
return caps.host.uuid
- def _get_host_cpu_for_guest(self):
- """Returns an instance of config.LibvirtConfigGuestCPU
- representing the host's CPU model & topology with
- policy for configuring a guest to match
- """
-
- caps = self._get_host_capabilities()
- hostcpu = caps.host.cpu
- guestcpu = vconfig.LibvirtConfigGuestCPU()
-
- guestcpu.model = hostcpu.model
- guestcpu.vendor = hostcpu.vendor
- guestcpu.arch = hostcpu.arch
-
- guestcpu.match = "exact"
-
- for hostfeat in hostcpu.features:
- guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name)
- guestfeat.policy = "require"
- guestcpu.add_feature(guestfeat)
-
- return guestcpu
-
def _get_guest_cpu_model_config(self):
mode = CONF.libvirt.cpu_mode
model = CONF.libvirt.cpu_model
@@ -3003,22 +3197,9 @@ def _get_guest_cpu_model_config(self):
LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen",
{'mode': mode, 'model': (model or "")})
- # TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is
- # updated to be at least this new, we can kill off the elif
- # blocks here
- if self._has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION):
- cpu = vconfig.LibvirtConfigGuestCPU()
- cpu.mode = mode
- cpu.model = model
- elif mode == "custom":
- cpu = vconfig.LibvirtConfigGuestCPU()
- cpu.model = model
- elif mode == "host-model":
- cpu = self._get_host_cpu_for_guest()
- elif mode == "host-passthrough":
- msg = _("Passthrough of the host CPU was requested but "
- "this libvirt version does not support this feature")
- raise exception.NovaException(msg)
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.mode = mode
+ cpu.model = model
return cpu
@@ -3124,14 +3305,22 @@ def _get_guest_storage_config(self, instance, image_meta,
cfg = self._connect_volume(connection_info, info)
devices.append(cfg)
vol['connection_info'] = connection_info
- vol.save(nova_context.get_admin_context())
+ vol.save()
if 'disk.config' in disk_mapping:
+ # NOTE(sileht): a configdrive is a raw image
+ # it works well with rbd, lvm and raw images_type
+ # but we must force to raw image_type if the desired
+ # images_type is qcow2
+ if CONF.libvirt.images_type not in ['rbd', 'lvm']:
+ image_type = "raw"
+ else:
+ image_type = None
diskconfig = self._get_guest_disk_config(instance,
'disk.config',
disk_mapping,
inst_type,
- 'raw')
+ image_type)
devices.append(diskconfig)
for d in devices:
@@ -3147,6 +3336,35 @@ def _get_guest_storage_config(self, instance, image_meta,
return devices
+ def _get_host_sysinfo_serial_hardware(self):
+ """Get a UUID from the host hardware
+
+ Get a UUID for the host hardware reported by libvirt.
+ This is typically from the SMBIOS data, unless it has
+ been overridden in /etc/libvirt/libvirtd.conf
+ """
+ return self._get_host_uuid()
+
+ def _get_host_sysinfo_serial_os(self):
+ """Get a UUID from the host operating system
+
+ Get a UUID for the host operating system. Modern Linux
+ distros based on systemd provide a /etc/machine-id
+ file containing a UUID. This is also provided inside
+ systemd based containers and can be provided by other
+ init systems too, since it is just a plain text file.
+ """
+ with open("/etc/machine-id") as f:
+ # We want to have '-' in the right place
+ # so we parse & reformat the value
+ return str(uuid.UUID(f.read().split()[0]))
+
+ def _get_host_sysinfo_serial_auto(self):
+ if os.path.exists("/etc/machine-id"):
+ return self._get_host_sysinfo_serial_os()
+ else:
+ return self._get_host_sysinfo_serial_hardware()
+
def _get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
@@ -3154,7 +3372,7 @@ def _get_guest_config_sysinfo(self, instance):
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
- sysinfo.system_serial = self._get_host_uuid()
+ sysinfo.system_serial = self._sysinfo_serial_func()
sysinfo.system_uuid = instance['uuid']
return sysinfo
@@ -3173,8 +3391,74 @@ def _get_guest_pci_device(self, pci_device):
return dev
+ def _get_guest_config_meta(self, context, instance, flavor):
+ """Get metadata config for guest."""
+
+ meta = vconfig.LibvirtConfigGuestMetaNovaInstance()
+ meta.package = version.version_string_with_package()
+ meta.name = instance["display_name"]
+ meta.creationTime = time.time()
+
+ if instance["image_ref"] not in ("", None):
+ meta.roottype = "image"
+ meta.rootid = instance["image_ref"]
+
+ if context is not None:
+ ometa = vconfig.LibvirtConfigGuestMetaNovaOwner()
+ ometa.userid = context.user_id
+ ometa.username = context.user_name
+ ometa.projectid = context.project_id
+ ometa.projectname = context.project_name
+ meta.owner = ometa
+
+ fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor()
+ fmeta.name = flavor.name
+ fmeta.memory = flavor.memory_mb
+ fmeta.vcpus = flavor.vcpus
+ fmeta.ephemeral = flavor.ephemeral_gb
+ fmeta.disk = flavor.root_gb
+ fmeta.swap = flavor.swap
+
+ meta.flavor = fmeta
+
+ return meta
+
+ def _machine_type_mappings(self):
+ mappings = {}
+ for mapping in CONF.libvirt.hw_machine_type:
+ host_arch, _, machine_type = mapping.partition('=')
+ mappings[host_arch] = machine_type
+ return mappings
+
+ def _get_machine_type(self, image_meta, caps):
+ # The underlying machine type can be set as an image attribute,
+ # or otherwise based on some architecture specific defaults
+
+ mach_type = None
+
+ if (image_meta is not None and image_meta.get('properties') and
+ image_meta['properties'].get('hw_machine_type')
+ is not None):
+ mach_type = image_meta['properties']['hw_machine_type']
+ else:
+ # For ARM systems we will default to vexpress-a15 for armv7
+ # and virt for aarch64
+ if caps.host.cpu.arch == "armv7l":
+ mach_type = "vexpress-a15"
+
+ if caps.host.cpu.arch == "aarch64":
+ mach_type = "virt"
+
+ # If set in the config, use that as the default.
+ if CONF.libvirt.hw_machine_type:
+ mappings = self._machine_type_mappings()
+ mach_type = mappings.get(caps.host.cpu.arch)
+
+ return mach_type
+
def _get_guest_config(self, instance, network_info, image_meta,
- disk_info, rescue=None, block_device_info=None):
+ disk_info, rescue=None, block_device_info=None,
+ context=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
@@ -3182,7 +3466,7 @@ def _get_guest_config(self, instance, network_info, image_meta,
'kernel_id' if a kernel is needed for the rescue image.
"""
- flavor = flavor_obj.Flavor.get_by_id(
+ flavor = objects.Flavor.get_by_id(
nova_context.get_admin_context(read_deleted='yes'),
instance['instance_type_id'])
inst_path = libvirt_utils.get_instance_path(instance)
@@ -3200,6 +3484,10 @@ def _get_guest_config(self, instance, network_info, image_meta,
guest.vcpus = flavor.vcpus
guest.cpuset = hardware.get_vcpu_pin_set()
+ guest.metadata.append(self._get_guest_config_meta(context,
+ instance,
+ flavor))
+
cputuning = ['shares', 'period', 'quota']
for name in cputuning:
key = "quota:cpu_" + name
@@ -3243,22 +3531,7 @@ def _get_guest_config(self, instance, network_info, image_meta,
if caps.host.cpu.arch in ("i686", "x86_64"):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
-
- # The underlying machine type can be set as an image attribute,
- # or otherwise based on some architecture specific defaults
- if (image_meta is not None and image_meta.get('properties') and
- image_meta['properties'].get('hw_machine_type')
- is not None):
- guest.os_mach_type = \
- image_meta['properties']['hw_machine_type']
- else:
- # For ARM systems we will default to vexpress-a15 for armv7
- # and virt for aarch64
- if caps.host.cpu.arch == "armv7l":
- guest.os_mach_type = "vexpress-a15"
-
- if caps.host.cpu.arch == "aarch64":
- guest.os_mach_type = "virt"
+ guest.os_mach_type = self._get_machine_type(image_meta, caps)
if CONF.libvirt.virt_type == "lxc":
guest.os_init_path = "/sbin/init"
@@ -3342,20 +3615,19 @@ def _get_guest_config(self, instance, network_info, image_meta,
tmhpet.present = False
clk.add_timer(tmhpet)
- for cfg in self._get_guest_storage_config(instance,
+ for config in self._get_guest_storage_config(instance,
image_meta,
disk_info,
rescue,
block_device_info,
flavor):
- guest.add_device(cfg)
+ guest.add_device(config)
for vif in network_info:
- cfg = self.vif_driver.get_config(instance,
- vif,
- image_meta,
- flavor)
- guest.add_device(cfg)
+ config = self.vif_driver.get_config(
+ instance, vif, image_meta,
+ flavor, CONF.libvirt.virt_type)
+ guest.add_device(config)
if ((CONF.libvirt.virt_type == "qemu" or
CONF.libvirt.virt_type == "kvm")):
@@ -3517,7 +3789,7 @@ def _get_guest_config(self, instance, network_info, image_meta,
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
- # We should get image metadata every time for generating xml
+
if image_meta is None:
image_ref = instance['image_ref']
image_meta = compute_utils.get_image_metadata(
@@ -3538,7 +3810,8 @@ def _get_guest_xml(self, context, instance, network_info, disk_info,
# need to sanitize the password in the message.
LOG.debug(logging.mask_password(msg), instance=instance)
conf = self._get_guest_config(instance, network_info, image_meta,
- disk_info, rescue, block_device_info)
+ disk_info, rescue, block_device_info,
+ context)
xml = conf.to_xml()
if write_to_disk:
@@ -3609,6 +3882,43 @@ def get_info(self, instance):
'cpu_time': dom_info[4],
'id': virt_dom.ID()}
+ def _create_domain_setup_lxc(self, instance):
+ inst_path = libvirt_utils.get_instance_path(instance)
+ container_dir = os.path.join(inst_path, 'rootfs')
+ fileutils.ensure_tree(container_dir)
+
+ image = self.image_backend.image(instance, 'disk')
+ rootfs_dev = disk.setup_container(image.path,
+ container_dir=container_dir,
+ use_cow=CONF.use_cow_images)
+
+ try:
+ # Save rootfs device to disconnect it when deleting the instance
+ if rootfs_dev:
+ instance.system_metadata['rootfs_device_name'] = rootfs_dev
+ instance.save()
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self._create_domain_cleanup_lxc(instance)
+
+ def _create_domain_cleanup_lxc(self, instance):
+ inst_path = libvirt_utils.get_instance_path(instance)
+ container_dir = os.path.join(inst_path, 'rootfs')
+
+ try:
+ state = self.get_info(instance)['state']
+ except exception.InstanceNotFound:
+ # The domain may not be present if the instance failed to start
+ state = None
+
+ if state == power_state.RUNNING:
+ # NOTE(uni): Now the container is running with its own private
+ # mount namespace and so there is no need to keep the container
+ # rootfs mounted in the host namespace
+ disk.clean_lxc_namespace(container_dir=container_dir)
+ else:
+ disk.teardown_container(container_dir=container_dir)
+
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0, power_on=True):
"""Create a domain.
@@ -3616,64 +3926,30 @@ def _create_domain(self, xml=None, domain=None,
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
- inst_path = None
- if instance:
- inst_path = libvirt_utils.get_instance_path(instance)
-
- if CONF.libvirt.virt_type == 'lxc':
- if not inst_path:
- inst_path = None
-
- container_dir = os.path.join(inst_path, 'rootfs')
- fileutils.ensure_tree(container_dir)
- image = self.image_backend.image(instance, 'disk')
- container_root_device = disk.setup_container(image.path,
- container_dir=container_dir,
- use_cow=CONF.use_cow_images)
-
- #Note(GuanQiang): save container root device name here, used for
- # detaching the linked image device when deleting
- # the lxc instance.
- if container_root_device:
- instance.root_device_name = container_root_device
- instance.save()
-
- if xml:
- try:
+ err = None
+ if instance and CONF.libvirt.virt_type == 'lxc':
+ self._create_domain_setup_lxc(instance)
+ try:
+ if xml:
+ err = _LE('Error defining a domain with XML: %s') % xml
domain = self._conn.defineXML(xml)
- except Exception as e:
- LOG.error(_LE("An error occurred while trying to define "
- "a domain with xml: %s"), xml)
- raise e
- if power_on:
- try:
+ if power_on:
+ err = _LE('Error launching a defined domain with XML: %s') \
+ % domain.XMLDesc(0)
domain.createWithFlags(launch_flags)
- except Exception as e:
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("An error occurred while trying to launch a "
- "defined domain with xml: %s"),
- domain.XMLDesc(0))
- if not utils.is_neutron():
- try:
+ if not utils.is_neutron():
+ err = _LE('Error enabling hairpin mode with XML: %s') \
+ % domain.XMLDesc(0)
self._enable_hairpin(domain.XMLDesc(0))
- except Exception:
- with excutils.save_and_reraise_exception():
- LOG.error(_LE("An error occurred while enabling hairpin "
- "mode on domain with xml: %s"),
- domain.XMLDesc(0))
-
- # NOTE(uni): Now the container is running with its own private mount
- # namespace and so there is no need to keep the container rootfs
- # mounted in the host namespace
- if CONF.libvirt.virt_type == 'lxc':
- state = self.get_info(instance)['state']
- container_dir = os.path.join(inst_path, 'rootfs')
- if state == power_state.RUNNING:
- disk.clean_lxc_namespace(container_dir=container_dir)
- else:
- disk.teardown_container(container_dir=container_dir)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ if err:
+ LOG.error(err)
+ finally:
+ if instance and CONF.libvirt.virt_type == 'lxc':
+ self._create_domain_cleanup_lxc(instance)
return domain
@@ -3708,12 +3984,13 @@ def _create_domain_and_network(self, context, xml, instance, network_info,
conf = self._connect_volume(connection_info, disk_info)
# cache device_path in connection_info -- required by encryptors
- if (not reboot and 'data' in connection_info and
- 'volume_id' in connection_info['data']):
+ if 'data' in connection_info:
connection_info['data']['device_path'] = conf.source_path
vol['connection_info'] = connection_info
vol.save(context)
+ if (not reboot and 'data' in connection_info and
+ 'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
@@ -3874,6 +4151,8 @@ def _get_local_gb_info():
if CONF.libvirt.images_type == 'lvm':
info = lvm.get_volume_group_info(
CONF.libvirt.images_volume_group)
+ elif CONF.libvirt.images_type == 'rbd':
+ info = LibvirtDriver._get_rbd_driver().get_pool_info()
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
@@ -4094,7 +4373,7 @@ def _get_device_type(cfgdev):
"vendor_id": cfgdev.pci_capability.vendor_id[2:6],
}
- #requirement by DataBase Model
+ # requirement by DataBase Model
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(_get_device_type(cfgdev))
return device
@@ -4204,9 +4483,9 @@ def interface_stats(self, instance_name, iface_id):
return domain.interfaceStats(iface_id)
def get_console_pool_info(self, console_type):
- #TODO(mdragon): console proxy should be implemented for libvirt,
- # in case someone wants to use it with kvm or
- # such. For now return fake data.
+ # TODO(mdragon): console proxy should be implemented for libvirt,
+ # in case someone wants to use it with kvm or
+ # such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
@@ -4724,13 +5003,6 @@ def pre_live_migration(self, context, instance, block_device_info,
is_block_migration = migrate_data.get('block_migration', True)
instance_relative_path = migrate_data.get('instance_relative_path')
- if not (is_shared_instance_path and is_shared_block_storage):
- # NOTE(mikal): live migration of instances using config drive is
- # not supported because of a bug in libvirt (read only devices
- # are not copied by libvirt). See bug/1246201
- if configdrive.required_by(instance):
- raise exception.NoLiveMigrationForConfigDriveInLibVirt()
-
if not is_shared_instance_path:
# NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
# because we are ensuring that the same instance directory name
@@ -4745,10 +5017,10 @@ def pre_live_migration(self, context, instance, block_device_info,
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
- if not is_shared_block_storage:
- # Ensure images and backing files are present.
- self._create_images_and_backing(context, instance,
- instance_dir, disk_info)
+ if not is_shared_block_storage:
+ # Ensure images and backing files are present.
+ self._create_images_and_backing(context, instance,
+ instance_dir, disk_info)
if not (is_block_migration or is_shared_instance_path):
# NOTE(angdraug): when block storage is shared between source and
@@ -4868,6 +5140,15 @@ def post_live_migration(self, context, instance, block_device_info,
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
+ def post_live_migration_at_source(self, context, instance, network_info):
+ """Unplug VIFs from networks at source.
+
+ :param context: security context
+ :param instance: instance object reference
+ :param network_info: instance network information
+ """
+ self.unplug_vifs(instance, network_info)
+
def post_live_migration_at_destination(self, context,
instance,
network_info,
@@ -4999,7 +5280,7 @@ def _get_disk_over_committed_size_total(self):
'but disk file was removed by concurrent '
'operations such as resize.'),
{'i_name': dom.name()})
- if e.errno == errno.EACCES:
+ elif e.errno == errno.EACCES:
LOG.warn(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but access is denied. It is most likely '
@@ -5035,7 +5316,7 @@ def get_host_cpu_stats(self):
def get_host_uptime(self, host):
"""Returns the result of calling "uptime"."""
- #NOTE(dprince): host seems to be ignored for this call and in
+ # NOTE(dprince): host seems to be ignored for this call and in
# other compute drivers as well. Perhaps we should remove it?
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
@@ -5079,9 +5360,10 @@ def _is_storage_shared_with(self, dest, inst_base):
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
- block_device_info=None):
+ block_device_info=None,
+ timeout=0, retry_interval=0):
LOG.debug("Starting migrate_disk_and_power_off",
- instance=instance)
+ instance=instance)
# Checks if the migration needs a disk resize down.
for kind in ('root_gb', 'ephemeral_gb'):
@@ -5094,6 +5376,12 @@ def migrate_disk_and_power_off(self, context, instance, dest,
block_device_info=block_device_info)
disk_info = jsonutils.loads(disk_info_text)
+ # NOTE(dgenin): Migration is not implemented for LVM backed instances.
+ if (CONF.libvirt.images_type == 'lvm' and
+ not self._is_booted_from_volume(instance, disk_info_text)):
+ reason = "Migration is not supported for LVM backed instances"
+ raise exception.MigrationPreCheckError(reason)
+
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
@@ -5107,7 +5395,7 @@ def migrate_disk_and_power_off(self, context, instance, dest,
if not shared_storage:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
- self.power_off(instance)
+ self.power_off(instance, timeout, retry_interval)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
@@ -5218,7 +5506,7 @@ def _disk_resize(self, info, size):
"""
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
- fmt = info['type']
+ fmt, org = [info['type']] * 2
pth = info['path']
if (size and fmt == 'qcow2' and
disk.can_resize_image(pth, size) and
@@ -5230,7 +5518,7 @@ def _disk_resize(self, info, size):
use_cow = fmt == 'qcow2'
disk.extend(pth, size, use_cow=use_cow)
- if fmt == 'raw' and CONF.use_cow_images:
+ if fmt != org:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
self._disk_raw_to_qcow2(pth)
@@ -5244,7 +5532,10 @@ def finish_migration(self, context, migration, instance, disk_info,
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
size = self._disk_size_from_instance(instance, info)
- self._disk_resize(info, size)
+ if resize_instance:
+ self._disk_resize(info, size)
+ if info['type'] == 'raw' and CONF.use_cow_images:
+ self._disk_raw_to_qcow2(info['path'])
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
@@ -5310,24 +5601,25 @@ def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
- def get_diagnostics(self, instance):
- def get_io_devices(xml_doc):
- """get the list of io devices from the xml document."""
- result = {"volumes": [], "ifaces": []}
- try:
- doc = etree.fromstring(xml_doc)
- except Exception:
- return result
- blocks = [('./devices/disk', 'volumes'),
- ('./devices/interface', 'ifaces')]
- for block, key in blocks:
- section = doc.findall(block)
- for node in section:
- for child in node.getchildren():
- if child.tag == 'target' and child.get('dev'):
- result[key].append(child.get('dev'))
+ @staticmethod
+ def _get_io_devices(xml_doc):
+ """get the list of io devices from the xml document."""
+ result = {"volumes": [], "ifaces": []}
+ try:
+ doc = etree.fromstring(xml_doc)
+ except Exception:
return result
+ blocks = [('./devices/disk', 'volumes'),
+ ('./devices/interface', 'ifaces')]
+ for block, key in blocks:
+ section = doc.findall(block)
+ for node in section:
+ for child in node.getchildren():
+ if child.tag == 'target' and child.get('dev'):
+ result[key].append(child.get('dev'))
+ return result
+ def get_diagnostics(self, instance):
domain = self._lookup_by_name(instance['name'])
output = {}
# get cpu time, might launch an exception if the method
@@ -5341,18 +5633,18 @@ def get_io_devices(xml_doc):
pass
# get io status
xml = domain.XMLDesc(0)
- dom_io = get_io_devices(xml)
- for disk in dom_io["volumes"]:
+ dom_io = LibvirtDriver._get_io_devices(xml)
+ for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
- stats = domain.blockStats(disk)
- output[disk + "_read_req"] = stats[0]
- output[disk + "_read"] = stats[1]
- output[disk + "_write_req"] = stats[2]
- output[disk + "_write"] = stats[3]
- output[disk + "_errors"] = stats[4]
+ stats = domain.blockStats(guest_disk)
+ output[guest_disk + "_read_req"] = stats[0]
+ output[guest_disk + "_read"] = stats[1]
+ output[guest_disk + "_write_req"] = stats[2]
+ output[guest_disk + "_write"] = stats[3]
+ output[guest_disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
@@ -5383,6 +5675,76 @@ def get_io_devices(xml_doc):
pass
return output
+ def get_instance_diagnostics(self, instance):
+ domain = self._lookup_by_name(instance['name'])
+ xml = domain.XMLDesc(0)
+ xml_doc = etree.fromstring(xml)
+
+ (state, max_mem, mem, num_cpu, cpu_time) = domain.info()
+ config_drive = configdrive.required_by(instance)
+ launched_at = timeutils.normalize_time(instance['launched_at'])
+ uptime = timeutils.delta_seconds(launched_at,
+ timeutils.utcnow())
+ diags = diagnostics.Diagnostics(state=power_state.STATE_MAP[state],
+ driver='libvirt',
+ config_drive=config_drive,
+ hypervisor_os='linux',
+ uptime=uptime)
+ diags.memory_details.maximum = max_mem / units.Mi
+ diags.memory_details.used = mem / units.Mi
+
+ # get cpu time, might launch an exception if the method
+ # is not supported by the underlying hypervisor being
+ # used by libvirt
+ try:
+ cputime = domain.vcpus()[0]
+ num_cpus = len(cputime)
+ for i in range(num_cpus):
+ diags.add_cpu(time=cputime[i][2])
+ except libvirt.libvirtError:
+ pass
+ # get io status
+ dom_io = LibvirtDriver._get_io_devices(xml)
+ for guest_disk in dom_io["volumes"]:
+ try:
+ # blockStats might launch an exception if the method
+ # is not supported by the underlying hypervisor being
+ # used by libvirt
+ stats = domain.blockStats(guest_disk)
+ diags.add_disk(read_bytes=stats[1],
+ read_requests=stats[0],
+ write_bytes=stats[3],
+ write_requests=stats[2])
+ except libvirt.libvirtError:
+ pass
+ for interface in dom_io["ifaces"]:
+ try:
+ # interfaceStats might launch an exception if the method
+ # is not supported by the underlying hypervisor being
+ # used by libvirt
+ stats = domain.interfaceStats(interface)
+ diags.add_nic(rx_octets=stats[0],
+ rx_errors=stats[2],
+ rx_drop=stats[3],
+ rx_packets=stats[1],
+ tx_octets=stats[4],
+ tx_errors=stats[6],
+ tx_drop=stats[7],
+ tx_packets=stats[5])
+ except libvirt.libvirtError:
+ pass
+
+ # Update mac addresses of interface if stats have been reported
+ if len(diags.nic_details) > 0:
+ ret = xml_doc.findall('./devices/interface')
+ index = 0
+ for node in ret:
+ for child in node.getchildren():
+ if child.tag == 'mac':
+ diags.nic_details[index].mac_address = child.get(
+ 'address')
+ return diags
+
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
@@ -5409,23 +5771,59 @@ def _delete_instance_files(self, instance):
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
- if os.path.exists(target):
- LOG.info(_LI('Deleting instance files %s'), target,
+ # A resize may be in progress
+ target_resize = target + '_resize'
+ # Other threads may attempt to rename the path, so renaming the path
+ # to target + '_del' (because it is atomic) and iterating through
+ # twice in the unlikely event that a concurrent rename occurs between
+ # the two rename attempts in this method. In general this method
+ # should be fairly thread-safe without these additional checks, since
+ # other operations involving renames are not permitted when the task
+ # state is not None and the task state should be set to something
+ # other than None by the time this method is invoked.
+ target_del = target + '_del'
+ for i in six.moves.range(2):
+ try:
+ utils.execute('mv', target, target_del)
+ break
+ except Exception:
+ pass
+ try:
+ utils.execute('mv', target_resize, target_del)
+ break
+ except Exception:
+ pass
+ # Either the target or target_resize path may still exist if all
+ # rename attempts failed.
+ remaining_path = None
+ for p in (target, target_resize):
+ if os.path.exists(p):
+ remaining_path = p
+ break
+
+ # A previous delete attempt may have been interrupted, so target_del
+ # may exist even if all rename attempts during the present method
+ # invocation failed due to the absence of both target and
+ # target_resize.
+ if not remaining_path and os.path.exists(target_del):
+ LOG.info(_LI('Deleting instance files %s'), target_del,
instance=instance)
+ remaining_path = target_del
try:
- shutil.rmtree(target)
+ shutil.rmtree(target_del)
except OSError as e:
LOG.error(_LE('Failed to cleanup directory %(target)s: '
- '%(e)s'), {'target': target, 'e': e},
+ '%(e)s'), {'target': target_del, 'e': e},
instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
- if os.path.exists(target):
- LOG.info(_LI('Deletion of %s failed'), target, instance=instance)
+ if remaining_path and os.path.exists(remaining_path):
+ LOG.info(_LI('Deletion of %s failed'), remaining_path,
+ instance=instance)
return False
- LOG.info(_LI('Deletion of %s complete'), target, instance=instance)
+ LOG.info(_LI('Deletion of %s complete'), target_del, instance=instance)
return True
@property
@@ -5453,6 +5851,10 @@ def default_device_names_for_instance(self, instance, root_device_name,
ephemerals, swap,
block_device_mapping)
+ def is_supported_fs_format(self, fs_type):
+ return fs_type in [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
+ disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
+
class HostState(object):
"""Manages information about the compute node through libvirt."""
@@ -5494,7 +5896,7 @@ def _get_disk_available_least():
disk_info_dict = self.driver._get_local_gb_info()
data = {}
- #NOTE(dprince): calling capabilities before getVersion works around
+ # NOTE(dprince): calling capabilities before getVersion works around
# an initialization issue with some versions of Libvirt (1.0.5.5).
# See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
# See: https://bugs.launchpad.net/nova/+bug/1215593
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index f36e7946ff..c8922fc278 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -18,8 +18,9 @@
from oslo.config import cfg
from nova.cloudpipe import pipelib
-from nova.openstack.common.gettextutils import _LI
-from nova.openstack.common.gettextutils import _LW
+from nova.i18n import _LI
+from nova.i18n import _LW
+from nova.openstack.common import importutils
from nova.openstack.common import log as logging
import nova.virt.firewall as base_firewall
from nova.virt import netutils
@@ -44,7 +45,7 @@ def __init__(self, virtapi, get_connection, **kwargs):
global libvirt
if libvirt is None:
try:
- libvirt = __import__('libvirt')
+ libvirt = importutils.import_module('libvirt')
except ImportError:
LOG.warn(_LW("Libvirt module could not be loaded. "
"NWFilterFirewall will not work correctly."))
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 4d73d536d1..ba226f9284 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -21,10 +21,11 @@
import six
from nova import exception
+from nova.i18n import _
+from nova.i18n import _LE
+from nova import image
from nova.openstack.common import excutils
from nova.openstack.common import fileutils
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common.gettextutils import _LE
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import units
@@ -33,17 +34,9 @@
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import lvm
+from nova.virt.libvirt import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
-
-try:
- import rados
- import rbd
-except ImportError:
- rados = None
- rbd = None
-
-
__imagebackend_opts = [
cfg.StrOpt('images_type',
default='default',
@@ -76,13 +69,18 @@
CONF.register_opts(__imagebackend_opts, 'libvirt')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('preallocate_images', 'nova.virt.driver')
+CONF.import_opt('rbd_user', 'nova.virt.libvirt.volume', group='libvirt')
+CONF.import_opt('rbd_secret_uuid', 'nova.virt.libvirt.volume', group='libvirt')
LOG = logging.getLogger(__name__)
+IMAGE_API = image.API()
@six.add_metaclass(abc.ABCMeta)
class Image(object):
+ SUPPORTS_CLONE = False
+
def __init__(self, source_type, driver_format, is_block_dev=False):
"""Image initialization.
@@ -112,9 +110,10 @@ def create_image(self, prepare_template, base, size, *args, **kwargs):
Contains specific behavior for each image type.
:prepare_template: function, that creates template.
- Should accept `target` argument.
+ Should accept `target` argument.
:base: Template name
:size: Size of created image in bytes
+
"""
pass
@@ -203,8 +202,7 @@ def _can_fallocate(self):
'path': self.path})
return can_fallocate
- @staticmethod
- def verify_base_size(base, size, base_size=0):
+ def verify_base_size(self, base, size, base_size=0):
"""Check that the base image is not larger than size.
Since images can't be generally shrunk, enforce this
constraint taking account of virtual image size.
@@ -223,7 +221,7 @@ def verify_base_size(base, size, base_size=0):
return
if size and not base_size:
- base_size = disk.get_disk_size(base)
+ base_size = self.get_disk_size(base)
if size < base_size:
msg = _LE('%(base)s virtual size %(base_size)s '
@@ -233,6 +231,9 @@ def verify_base_size(base, size, base_size=0):
'size': size})
raise exception.FlavorDiskTooSmall()
+ def get_disk_size(self, name):
+ disk.get_disk_size(name)
+
def snapshot_extract(self, target, out_format):
raise NotImplementedError()
@@ -301,6 +302,21 @@ def is_shared_block_storage():
"""True if the backend puts images on a shared block storage."""
return False
+ def clone(self, context, image_id_or_uri):
+ """Clone an image.
+
+ Note that clone operation is backend-dependent. The backend may ask
+ the image API for a list of image "locations" and select one or more
+ of those locations to clone an image from.
+
+ :param image_id_or_uri: The ID or URI of an image to clone.
+
+ :raises: exception.ImageUnacceptable if it cannot be cloned
+ """
+ reason = _('clone() is not implemented')
+ raise exception.ImageUnacceptable(image_id=image_id_or_uri,
+ reason=reason)
+
class Raw(Image):
def __init__(self, instance=None, disk_name=None, path=None):
@@ -336,7 +352,7 @@ def copy_raw_image(base, target, size):
generating = 'image_id' not in kwargs
if generating:
if not self.check_image_exists():
- #Generating image in place
+ # Generating image in place
prepare_template(target=self.path, *args, **kwargs)
else:
if not os.path.exists(base):
@@ -463,7 +479,7 @@ def create_lvm_image(base, size):
generated = 'ephemeral_size' in kwargs
- #Generate images with specified size right on volume
+ # Generate images with specified size right on volume
if generated and size:
lvm.create_volume(self.vg, self.lv,
size, sparse=self.sparse)
@@ -488,52 +504,10 @@ def snapshot_extract(self, target, out_format):
run_as_root=True)
-class RBDVolumeProxy(object):
- """Context manager for dealing with an existing rbd volume.
-
- This handles connecting to rados and opening an ioctx automatically, and
- otherwise acts like a librbd Image object.
-
- The underlying librados client and ioctx can be accessed as the attributes
- 'client' and 'ioctx'.
- """
- def __init__(self, driver, name, pool=None):
- client, ioctx = driver._connect_to_rados(pool)
- try:
- self.volume = driver.rbd.Image(ioctx, str(name), snapshot=None)
- except driver.rbd.Error:
- LOG.exception(_LE("error opening rbd image %s"), name)
- driver._disconnect_from_rados(client, ioctx)
- raise
- self.driver = driver
- self.client = client
- self.ioctx = ioctx
-
- def __enter__(self):
- return self
-
- def __exit__(self, type_, value, traceback):
- try:
- self.volume.close()
- finally:
- self.driver._disconnect_from_rados(self.client, self.ioctx)
-
- def __getattr__(self, attrib):
- return getattr(self.volume, attrib)
-
-
-def ascii_str(s):
- """Convert a string to ascii, or return None if the input is None.
-
- This is useful when a parameter is None by default, or a string. LibRBD
- only accepts ascii, hence the need for conversion.
- """
- if s is None:
- return s
- return str(s)
+class Rbd(Image):
+ SUPPORTS_CLONE = True
-class Rbd(Image):
def __init__(self, instance=None, disk_name=None, path=None, **kwargs):
super(Rbd, self).__init__("block", "rbd", is_block_dev=True)
if path:
@@ -549,10 +523,13 @@ def __init__(self, instance=None, disk_name=None, path=None, **kwargs):
' images_rbd_pool'
' flag to use rbd images.'))
self.pool = CONF.libvirt.images_rbd_pool
- self.ceph_conf = ascii_str(CONF.libvirt.images_rbd_ceph_conf)
- self.rbd_user = ascii_str(CONF.libvirt.rbd_user)
- self.rbd = kwargs.get('rbd', rbd)
- self.rados = kwargs.get('rados', rados)
+ self.rbd_user = CONF.libvirt.rbd_user
+ self.ceph_conf = CONF.libvirt.images_rbd_ceph_conf
+
+ self.driver = rbd_utils.RBDDriver(
+ pool=self.pool,
+ ceph_conf=self.ceph_conf,
+ rbd_user=self.rbd_user)
self.path = 'rbd:%s/%s' % (self.pool, self.rbd_name)
if self.rbd_user:
@@ -560,52 +537,6 @@ def __init__(self, instance=None, disk_name=None, path=None, **kwargs):
if self.ceph_conf:
self.path += ':conf=' + self.ceph_conf
- def _connect_to_rados(self, pool=None):
- client = self.rados.Rados(rados_id=self.rbd_user,
- conffile=self.ceph_conf)
- try:
- client.connect()
- pool_to_open = str(pool or self.pool)
- ioctx = client.open_ioctx(pool_to_open)
- return client, ioctx
- except self.rados.Error:
- # shutdown cannot raise an exception
- client.shutdown()
- raise
-
- def _disconnect_from_rados(self, client, ioctx):
- # closing an ioctx cannot raise an exception
- ioctx.close()
- client.shutdown()
-
- def _supports_layering(self):
- return hasattr(self.rbd, 'RBD_FEATURE_LAYERING')
-
- def _ceph_args(self):
- args = []
- if self.rbd_user:
- args.extend(['--id', self.rbd_user])
- if self.ceph_conf:
- args.extend(['--conf', self.ceph_conf])
- return args
-
- def _get_mon_addrs(self):
- args = ['ceph', 'mon', 'dump', '--format=json'] + self._ceph_args()
- out, _ = utils.execute(*args)
- lines = out.split('\n')
- if lines[0].startswith('dumped monmap epoch'):
- lines = lines[1:]
- monmap = jsonutils.loads('\n'.join(lines))
- addrs = [mon['addr'] for mon in monmap['mons']]
- hosts = []
- ports = []
- for addr in addrs:
- host_port = addr[:addr.rindex('/')]
- host, port = host_port.rsplit(':', 1)
- hosts.append(host.strip('[]'))
- ports.append(port)
- return hosts, ports
-
def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode,
extra_specs, hypervisor_version):
"""Get `LibvirtConfigGuestDisk` filled for this image.
@@ -618,8 +549,8 @@ def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode,
"""
info = vconfig.LibvirtConfigGuestDisk()
- hosts, ports = self._get_mon_addrs()
- info.device_type = device_type
+ hosts, ports = self.driver.get_mon_addrs()
+ info.source_device = device_type
info.driver_format = 'raw'
info.driver_cache = cache_mode
info.target_bus = disk_bus
@@ -644,38 +575,30 @@ def _can_fallocate(self):
return False
def check_image_exists(self):
- rbd_volumes = libvirt_utils.list_rbd_volumes(self.pool)
- for vol in rbd_volumes:
- if vol.startswith(self.rbd_name):
- return True
+ return self.driver.exists(self.rbd_name)
- return False
+ def get_disk_size(self, name):
+ """Returns the size of the virtual disk in bytes.
- def _resize(self, volume_name, size):
- with RBDVolumeProxy(self, volume_name) as vol:
- vol.resize(int(size))
+ The name argument is ignored since this backend already knows
+ its name, and callers may pass a non-existent local file path.
+ """
+ return self.driver.size(self.rbd_name)
def create_image(self, prepare_template, base, size, *args, **kwargs):
- if self.rbd is None:
- raise RuntimeError(_('rbd python libraries not found'))
- if not os.path.exists(base):
+ if not self.check_image_exists():
prepare_template(target=base, max_size=size, *args, **kwargs)
else:
self.verify_base_size(base, size)
- # keep using the command line import instead of librbd since it
- # detects zeroes to preserve sparseness in the image
- args = ['--pool', self.pool, base, self.rbd_name]
- if self._supports_layering():
- args += ['--new-format']
- args += self._ceph_args()
- libvirt_utils.import_rbd_image(*args)
-
- base_size = disk.get_disk_size(base)
+ # prepare_template() may have cloned the image into a new rbd
+ # image already instead of downloading it locally
+ if not self.check_image_exists():
+ self.driver.import_image(base, self.rbd_name)
- if size and size > base_size:
- self._resize(self.rbd_name, size)
+ if size and size > self.get_disk_size(self.rbd_name):
+ self.driver.resize(self.rbd_name, size)
def snapshot_extract(self, target, out_format):
images.convert_image(self.path, target, out_format)
@@ -684,6 +607,31 @@ def snapshot_extract(self, target, out_format):
def is_shared_block_storage():
return True
+ def clone(self, context, image_id_or_uri):
+ if not self.driver.supports_layering():
+ reason = _('installed version of librbd does not support cloning')
+ raise exception.ImageUnacceptable(image_id=image_id_or_uri,
+ reason=reason)
+
+ image_meta = IMAGE_API.get(context, image_id_or_uri,
+ include_locations=True)
+ locations = image_meta['locations']
+
+ LOG.debug('Image locations are: %(locs)s' % {'locs': locations})
+
+ if image_meta.get('disk_format') not in ['raw', 'iso']:
+ reason = _('Image is not raw format')
+ raise exception.ImageUnacceptable(image_id=image_id_or_uri,
+ reason=reason)
+
+ for location in locations:
+ if self.driver.is_cloneable(location, image_meta):
+ return self.driver.clone(location, self.rbd_name)
+
+ reason = _('No image locations are accessible')
+ raise exception.ImageUnacceptable(image_id=image_id_or_uri,
+ reason=reason)
+
class Backend(object):
def __init__(self, use_cow):
@@ -709,7 +657,8 @@ def image(self, instance, disk_name, image_type=None):
:instance: Instance name.
:name: Image name.
:image_type: Image type.
- Optional, is CONF.libvirt.images_type by default.
+ Optional, is CONF.libvirt.images_type by default.
+
"""
backend = self.backend(image_type)
return backend(instance=instance, disk_name=disk_name)
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index 0542b431a4..fa8e460ded 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -21,23 +21,22 @@
"""
import hashlib
-import json
import os
import re
import time
from oslo.config import cfg
+from nova.i18n import _LE
+from nova.i18n import _LI
+from nova.i18n import _LW
from nova.openstack.common import fileutils
-from nova.openstack.common.gettextutils import _LE
-from nova.openstack.common.gettextutils import _LI
-from nova.openstack.common.gettextutils import _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
from nova.virt import imagecache
-from nova.virt.libvirt import utils as virtutils
+from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
@@ -206,7 +205,7 @@ def write_file(info_file, field, value):
d['%s-timestamp' % field] = time.time()
with open(info_file, 'w') as f:
- f.write(json.dumps(d))
+ f.write(jsonutils.dumps(d))
write_file(info_file, field, value)
@@ -291,7 +290,7 @@ def _list_backing_images(self):
if os.path.exists(disk_path):
LOG.debug('%s has a disk file', ent)
try:
- backing_file = virtutils.get_disk_backing_file(
+ backing_file = libvirt_utils.get_disk_backing_file(
disk_path)
except processutils.ProcessExecutionError:
# (for bug 1261442)
@@ -516,7 +515,7 @@ def _handle_base_image(self, img_id, base_file):
{'id': img_id,
'base_file': base_file})
if os.path.exists(base_file):
- virtutils.chown(base_file, os.getuid())
+ libvirt_utils.chown(base_file, os.getuid())
os.utime(base_file, None)
def _age_and_verify_cached_images(self, context, all_instances, base_dir):
diff --git a/nova/virt/libvirt/lvm.py b/nova/virt/libvirt/lvm.py
index 0671a5086b..1ef455eb83 100644
--- a/nova/virt/libvirt/lvm.py
+++ b/nova/virt/libvirt/lvm.py
@@ -22,9 +22,9 @@
from oslo.config import cfg
from nova import exception
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common.gettextutils import _LE
-from nova.openstack.common.gettextutils import _LW
+from nova.i18n import _
+from nova.i18n import _LE
+from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.openstack.common import units
diff --git a/nova/virt/libvirt/rbd_utils.py b/nova/virt/libvirt/rbd_utils.py
new file mode 100644
index 0000000000..e638cf97c8
--- /dev/null
+++ b/nova/virt/libvirt/rbd_utils.py
@@ -0,0 +1,277 @@
+# Copyright 2012 Grid Dynamics
+# Copyright 2013 Inktank Storage, Inc.
+# Copyright 2014 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import urllib
+
+try:
+ import rados
+ import rbd
+except ImportError:
+ rados = None
+ rbd = None
+
+from nova import exception
+from nova.i18n import _
+from nova.i18n import _LE
+from nova.i18n import _LW
+from nova.openstack.common import excutils
+from nova.openstack.common import jsonutils
+from nova.openstack.common import log as logging
+from nova.openstack.common import units
+from nova import utils
+
+LOG = logging.getLogger(__name__)
+
+
+class RBDVolumeProxy(object):
+ """Context manager for dealing with an existing rbd volume.
+
+ This handles connecting to rados and opening an ioctx automatically, and
+ otherwise acts like a librbd Image object.
+
+ The underlying librados client and ioctx can be accessed as the attributes
+ 'client' and 'ioctx'.
+ """
+ def __init__(self, driver, name, pool=None, snapshot=None,
+ read_only=False):
+ client, ioctx = driver._connect_to_rados(pool)
+ try:
+ snap_name = snapshot.encode('utf8') if snapshot else None
+ self.volume = rbd.Image(ioctx, name.encode('utf8'),
+ snapshot=snap_name,
+ read_only=read_only)
+ except rbd.ImageNotFound:
+ with excutils.save_and_reraise_exception():
+ LOG.debug("rbd image %s does not exist", name)
+ driver._disconnect_from_rados(client, ioctx)
+ except rbd.Error:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_LE("error opening rbd image %s"), name)
+ driver._disconnect_from_rados(client, ioctx)
+
+ self.driver = driver
+ self.client = client
+ self.ioctx = ioctx
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type_, value, traceback):
+ try:
+ self.volume.close()
+ finally:
+ self.driver._disconnect_from_rados(self.client, self.ioctx)
+
+ def __getattr__(self, attrib):
+ return getattr(self.volume, attrib)
+
+
+class RADOSClient(object):
+ """Context manager to simplify error handling for connecting to ceph."""
+ def __init__(self, driver, pool=None):
+ self.driver = driver
+ self.cluster, self.ioctx = driver._connect_to_rados(pool)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type_, value, traceback):
+ self.driver._disconnect_from_rados(self.cluster, self.ioctx)
+
+
+class RBDDriver(object):
+
+ def __init__(self, pool, ceph_conf, rbd_user):
+ self.pool = pool.encode('utf8')
+ # NOTE(angdraug): rados.Rados fails to connect if ceph_conf is None:
+ # https://github.com/ceph/ceph/pull/1787
+ self.ceph_conf = ceph_conf.encode('utf8') if ceph_conf else ''
+ self.rbd_user = rbd_user.encode('utf8') if rbd_user else None
+ if rbd is None:
+ raise RuntimeError(_('rbd python libraries not found'))
+
+ def _connect_to_rados(self, pool=None):
+ client = rados.Rados(rados_id=self.rbd_user,
+ conffile=self.ceph_conf)
+ try:
+ client.connect()
+ pool_to_open = pool or self.pool
+ ioctx = client.open_ioctx(pool_to_open.encode('utf-8'))
+ return client, ioctx
+ except rados.Error:
+ # shutdown cannot raise an exception
+ client.shutdown()
+ raise
+
+ def _disconnect_from_rados(self, client, ioctx):
+ # closing an ioctx cannot raise an exception
+ ioctx.close()
+ client.shutdown()
+
+ def supports_layering(self):
+ return hasattr(rbd, 'RBD_FEATURE_LAYERING')
+
+ def ceph_args(self):
+ """List of command line parameters to be passed to ceph commands to
+ reflect RBDDriver configuration such as RBD user name and location
+ of ceph.conf.
+ """
+ args = []
+ if self.rbd_user:
+ args.extend(['--id', self.rbd_user])
+ if self.ceph_conf:
+ args.extend(['--conf', self.ceph_conf])
+ return args
+
+ def get_mon_addrs(self):
+ args = ['ceph', 'mon', 'dump', '--format=json'] + self.ceph_args()
+ out, _ = utils.execute(*args)
+ lines = out.split('\n')
+ if lines[0].startswith('dumped monmap epoch'):
+ lines = lines[1:]
+ monmap = jsonutils.loads('\n'.join(lines))
+ addrs = [mon['addr'] for mon in monmap['mons']]
+ hosts = []
+ ports = []
+ for addr in addrs:
+ host_port = addr[:addr.rindex('/')]
+ host, port = host_port.rsplit(':', 1)
+ hosts.append(host.strip('[]'))
+ ports.append(port)
+ return hosts, ports
+
+ def parse_url(self, url):
+ prefix = 'rbd://'
+ if not url.startswith(prefix):
+ reason = _('Not stored in rbd')
+ raise exception.ImageUnacceptable(image_id=url, reason=reason)
+ pieces = map(urllib.unquote, url[len(prefix):].split('/'))
+ if '' in pieces:
+ reason = _('Blank components')
+ raise exception.ImageUnacceptable(image_id=url, reason=reason)
+ if len(pieces) != 4:
+ reason = _('Not an rbd snapshot')
+ raise exception.ImageUnacceptable(image_id=url, reason=reason)
+ return pieces
+
+ def _get_fsid(self):
+ with RADOSClient(self) as client:
+ return client.cluster.get_fsid()
+
+ def is_cloneable(self, image_location, image_meta):
+ url = image_location['url']
+ try:
+ fsid, pool, image, snapshot = self.parse_url(url)
+ except exception.ImageUnacceptable as e:
+ LOG.debug('not cloneable: %s', e)
+ return False
+
+ if self._get_fsid() != fsid:
+ reason = '%s is in a different ceph cluster' % url
+ LOG.debug(reason)
+ return False
+
+ if image_meta['disk_format'] != 'raw':
+ reason = ("rbd image clone requires image format to be "
+ "'raw' but image {0} is '{1}'").format(
+ url, image_meta['disk_format'])
+ LOG.debug(reason)
+ return False
+
+ # check that we can read the image
+ try:
+ return self.exists(image, pool=pool, snapshot=snapshot)
+ except rbd.Error as e:
+ LOG.debug('Unable to open image %(loc)s: %(err)s' %
+ dict(loc=url, err=e))
+ return False
+
+ def clone(self, image_location, dest_name):
+ _fsid, pool, image, snapshot = self.parse_url(
+ image_location['url'])
+ LOG.debug('cloning %(pool)s/%(img)s@%(snap)s' %
+ dict(pool=pool, img=image, snap=snapshot))
+ with RADOSClient(self, str(pool)) as src_client:
+ with RADOSClient(self) as dest_client:
+ # pylint: disable E1101
+ rbd.RBD().clone(src_client.ioctx,
+ image.encode('utf-8'),
+ snapshot.encode('utf-8'),
+ dest_client.ioctx,
+ dest_name,
+ features=rbd.RBD_FEATURE_LAYERING)
+
+ def size(self, name):
+ with RBDVolumeProxy(self, name) as vol:
+ return vol.size()
+
+ def resize(self, name, size):
+ """Resize RBD volume.
+
+ :name: Name of RBD object
+ :size: New size in bytes
+ """
+ LOG.debug('resizing rbd image %s to %d', name, size)
+ with RBDVolumeProxy(self, name) as vol:
+ vol.resize(size)
+
+ def exists(self, name, pool=None, snapshot=None):
+ try:
+ with RBDVolumeProxy(self, name,
+ pool=pool,
+ snapshot=snapshot,
+ read_only=True):
+ return True
+ except rbd.ImageNotFound:
+ return False
+
+ def import_image(self, base, name):
+ """Import RBD volume from image file.
+
+ Uses the command line import instead of librbd since rbd import
+ command detects zeroes to preserve sparseness in the image.
+
+ :base: Path to image file
+ :name: Name of RBD volume
+ """
+ args = ['--pool', self.pool, base, name]
+ if self.supports_layering():
+ args += ['--new-format']
+ args += self.ceph_args()
+ utils.execute('rbd', 'import', *args)
+
+ def cleanup_volumes(self, instance):
+ with RADOSClient(self, self.pool) as client:
+
+ def belongs_to_instance(disk):
+ return disk.startswith(instance['uuid'])
+
+ # pylint: disable=E1101
+ volumes = rbd.RBD().list(client.ioctx)
+ for volume in filter(belongs_to_instance, volumes):
+ try:
+ rbd.RBD().remove(client.ioctx, volume)
+ except (rbd.ImageNotFound, rbd.ImageHasSnapshots):
+ LOG.warn(_LW('rbd remove %(volume)s in pool %(pool)s '
+ 'failed'),
+ {'volume': volume, 'pool': self.pool})
+
+ def get_pool_info(self):
+ with RADOSClient(self) as client:
+ stats = client.cluster.get_cluster_stats()
+ return {'total': stats['kb'] * units.Ki,
+ 'free': stats['kb_avail'] * units.Ki,
+ 'used': stats['kb_used'] * units.Ki}
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 54ac1d3283..825a81a04e 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -25,9 +25,9 @@
from lxml import etree
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common.gettextutils import _LI
-from nova.openstack.common.gettextutils import _LW
+from nova.i18n import _
+from nova.i18n import _LI
+from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
@@ -203,52 +203,6 @@ def create_cow_image(backing_file, path, size=None):
execute(*cmd)
-def import_rbd_image(*args):
- execute('rbd', 'import', *args)
-
-
-def _run_rbd(*args, **kwargs):
- total = list(args)
-
- if CONF.libvirt.rbd_user:
- total.extend(['--id', str(CONF.libvirt.rbd_user)])
- if CONF.libvirt.images_rbd_ceph_conf:
- total.extend(['--conf', str(CONF.libvirt.images_rbd_ceph_conf)])
-
- return utils.execute(*total, **kwargs)
-
-
-def list_rbd_volumes(pool):
- """List volumes names for given ceph pool.
-
- :param pool: ceph pool name
- """
- try:
- out, err = _run_rbd('rbd', '-p', pool, 'ls')
- except processutils.ProcessExecutionError:
- # No problem when no volume in rbd pool
- return []
-
- return [line.strip() for line in out.splitlines()]
-
-
-def remove_rbd_volumes(pool, *names):
- """Remove one or more rbd volume."""
- for name in names:
- # NOTE(nic): the rbd command supports two methods for
- # specifying a pool name: the "-p" flag, and using the volume
- # name notation "pool_name/volume_name"
- # The latter method supercedes the former, so to guard
- # against slashes in the volume name confusing things, always
- # use the path notation
- rbd_remove = ('rbd', 'rm', os.path.join(pool, name))
- try:
- _run_rbd(*rbd_remove, attempts=3, run_as_root=True)
- except processutils.ProcessExecutionError:
- LOG.warn(_LW("rbd remove %(name)s in pool %(pool)s failed"),
- {'name': name, 'pool': pool})
-
-
def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
"""Pick the libvirt primary backend driver name
@@ -531,7 +485,7 @@ def is_mounted(mount_path, source=None):
except processutils.ProcessExecutionError as exc:
return False
except OSError as exc:
- #info since it's not required to have this tool.
+ # info since it's not required to have this tool.
if exc.errno == errno.ENOENT:
LOG.info(_LI("findmnt tool is not installed"))
return False
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 2532e786ca..45e52e924b 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -22,11 +22,10 @@
from oslo.config import cfg
from nova import exception
+from nova.i18n import _
+from nova.i18n import _LE
from nova.network import linux_net
from nova.network import model as network_model
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common.gettextutils import _LE
-from nova.openstack.common.gettextutils import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
@@ -43,63 +42,52 @@
CONF = cfg.CONF
CONF.register_opts(libvirt_vif_opts, 'libvirt')
-CONF.import_opt('virt_type', 'nova.virt.libvirt.driver', group='libvirt')
CONF.import_opt('use_ipv6', 'nova.netconf')
-# Since libvirt 0.9.11,
-# supports OpenVSwitch natively.
-LIBVIRT_OVS_VPORT_VERSION = 9011
DEV_PREFIX_ETH = 'eth'
def is_vif_model_valid_for_virt(virt_type, vif_model):
- valid_models = {
- 'qemu': [network_model.VIF_MODEL_VIRTIO,
- network_model.VIF_MODEL_NE2K_PCI,
- network_model.VIF_MODEL_PCNET,
- network_model.VIF_MODEL_RTL8139,
- network_model.VIF_MODEL_E1000],
- 'kvm': [network_model.VIF_MODEL_VIRTIO,
- network_model.VIF_MODEL_NE2K_PCI,
- network_model.VIF_MODEL_PCNET,
- network_model.VIF_MODEL_RTL8139,
- network_model.VIF_MODEL_E1000],
- 'xen': [network_model.VIF_MODEL_NETFRONT,
- network_model.VIF_MODEL_NE2K_PCI,
- network_model.VIF_MODEL_PCNET,
- network_model.VIF_MODEL_RTL8139,
- network_model.VIF_MODEL_E1000],
- 'lxc': [],
- 'uml': [],
- }
-
- if vif_model is None:
- return True
-
- if virt_type not in valid_models:
- raise exception.UnsupportedVirtType(virt=virt_type)
-
- return vif_model in valid_models[virt_type]
-
-
-class LibvirtBaseVIFDriver(object):
+ valid_models = {
+ 'qemu': [network_model.VIF_MODEL_VIRTIO,
+ network_model.VIF_MODEL_NE2K_PCI,
+ network_model.VIF_MODEL_PCNET,
+ network_model.VIF_MODEL_RTL8139,
+ network_model.VIF_MODEL_E1000,
+ network_model.VIF_MODEL_SPAPR_VLAN],
+ 'kvm': [network_model.VIF_MODEL_VIRTIO,
+ network_model.VIF_MODEL_NE2K_PCI,
+ network_model.VIF_MODEL_PCNET,
+ network_model.VIF_MODEL_RTL8139,
+ network_model.VIF_MODEL_E1000,
+ network_model.VIF_MODEL_SPAPR_VLAN],
+ 'xen': [network_model.VIF_MODEL_NETFRONT,
+ network_model.VIF_MODEL_NE2K_PCI,
+ network_model.VIF_MODEL_PCNET,
+ network_model.VIF_MODEL_RTL8139,
+ network_model.VIF_MODEL_E1000],
+ 'lxc': [],
+ 'uml': [],
+ }
+
+ if vif_model is None:
+ return True
+
+ if virt_type not in valid_models:
+ raise exception.UnsupportedVirtType(virt=virt_type)
+
+ return vif_model in valid_models[virt_type]
+
+
+class LibvirtGenericVIFDriver(object):
+ """Generic VIF driver for libvirt networking."""
def __init__(self, get_connection):
self.get_connection = get_connection
- self.libvirt_version = None
def _normalize_vif_type(self, vif_type):
return vif_type.replace('2.1q', '2q')
- def has_libvirt_version(self, want):
- if self.libvirt_version is None:
- conn = self.get_connection()
- self.libvirt_version = conn.getLibVersion()
-
- if self.libvirt_version >= want:
- return True
- return False
-
def get_vif_devname(self, vif):
if 'devname' in vif:
return vif['devname']
@@ -109,7 +97,8 @@ def get_vif_devname_with_prefix(self, vif, prefix):
devname = self.get_vif_devname(vif)
return prefix + devname[3:]
- def get_config(self, instance, vif, image_meta, inst_type):
+ def get_base_config(self, instance, vif, image_meta,
+ inst_type, virt_type):
conf = vconfig.LibvirtConfigGuestInterface()
# Default to letting libvirt / the hypervisor choose the model
model = None
@@ -126,36 +115,26 @@ def get_config(self, instance, vif, image_meta, inst_type):
# Else if the virt type is KVM/QEMU, use virtio according
# to the global config parameter
if (model is None and
- CONF.libvirt.virt_type in ('kvm', 'qemu') and
+ virt_type in ('kvm', 'qemu') and
CONF.libvirt.use_virtio_for_bridges):
model = network_model.VIF_MODEL_VIRTIO
# Workaround libvirt bug, where it mistakenly
# enables vhost mode, even for non-KVM guests
if (model == network_model.VIF_MODEL_VIRTIO and
- CONF.libvirt.virt_type == "qemu"):
+ virt_type == "qemu"):
driver = "qemu"
- if not is_vif_model_valid_for_virt(CONF.libvirt.virt_type,
+ if not is_vif_model_valid_for_virt(virt_type,
model):
raise exception.UnsupportedHardware(model=model,
- virt=CONF.libvirt.virt_type)
+ virt=virt_type)
designer.set_vif_guest_frontend_config(
conf, vif['address'], model, driver)
return conf
- def plug(self, instance, vif):
- pass
-
- def unplug(self, instance, vif):
- pass
-
-
-class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver):
- """Generic VIF driver for libvirt networking."""
-
def get_bridge_name(self, vif):
return vif['network']['bridge']
@@ -176,11 +155,11 @@ def get_firewall_required(self, vif):
return True
return False
- def get_config_bridge(self, instance, vif, image_meta, inst_type):
+ def get_config_bridge(self, instance, vif, image_meta,
+ inst_type, virt_type):
"""Get VIF configurations for bridge type."""
- conf = super(LibvirtGenericVIFDriver,
- self).get_config(instance, vif,
- image_meta, inst_type)
+ conf = self.get_base_config(instance, vif, image_meta,
+ inst_type, virt_type)
designer.set_vif_host_backend_bridge_config(
conf, self.get_bridge_name(vif),
@@ -194,22 +173,10 @@ def get_config_bridge(self, instance, vif, image_meta, inst_type):
return conf
- def get_config_ovs_ethernet(self, instance, vif,
- image_meta, inst_type):
- conf = super(LibvirtGenericVIFDriver,
- self).get_config(instance, vif,
- image_meta, inst_type)
-
- dev = self.get_vif_devname(vif)
- designer.set_vif_host_backend_ethernet_config(conf, dev)
-
- return conf
-
def get_config_ovs_bridge(self, instance, vif, image_meta,
- inst_type):
- conf = super(LibvirtGenericVIFDriver,
- self).get_config(instance, vif,
- image_meta, inst_type)
+ inst_type, virt_type):
+ conf = self.get_base_config(instance, vif, image_meta,
+ inst_type, virt_type)
designer.set_vif_host_backend_ovs_config(
conf, self.get_bridge_name(vif),
@@ -221,63 +188,65 @@ def get_config_ovs_bridge(self, instance, vif, image_meta,
return conf
def get_config_ovs_hybrid(self, instance, vif, image_meta,
- inst_type):
+ inst_type, virt_type):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
- return self.get_config_bridge(instance, newvif,
- image_meta, inst_type)
+ return self.get_config_bridge(instance, newvif, image_meta,
+ inst_type, virt_type)
- def get_config_ovs(self, instance, vif, image_meta, inst_type):
+ def get_config_ovs(self, instance, vif, image_meta,
+ inst_type, virt_type):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ovs_hybrid(instance, vif,
image_meta,
- inst_type)
- elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION):
+ inst_type,
+ virt_type)
+ else:
return self.get_config_ovs_bridge(instance, vif,
image_meta,
- inst_type)
- else:
- return self.get_config_ovs_ethernet(instance, vif,
- image_meta,
- inst_type)
+ inst_type,
+ virt_type)
def get_config_ivs_hybrid(self, instance, vif, image_meta,
- inst_type):
+ inst_type, virt_type):
newvif = copy.deepcopy(vif)
newvif['network']['bridge'] = self.get_br_name(vif['id'])
return self.get_config_bridge(instance,
newvif,
image_meta,
- inst_type)
+ inst_type,
+ virt_type)
def get_config_ivs_ethernet(self, instance, vif, image_meta,
- inst_type):
- conf = super(LibvirtGenericVIFDriver,
- self).get_config(instance,
- vif,
- image_meta,
- inst_type)
+ inst_type, virt_type):
+ conf = self.get_base_config(instance,
+ vif,
+ image_meta,
+ inst_type,
+ virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
return conf
- def get_config_ivs(self, instance, vif, image_meta, inst_type):
+ def get_config_ivs(self, instance, vif, image_meta,
+ inst_type, virt_type):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
return self.get_config_ivs_hybrid(instance, vif,
image_meta,
- inst_type)
+ inst_type,
+ virt_type)
else:
return self.get_config_ivs_ethernet(instance, vif,
image_meta,
- inst_type)
+ inst_type,
+ virt_type)
def get_config_802qbg(self, instance, vif, image_meta,
- inst_type):
- conf = super(LibvirtGenericVIFDriver,
- self).get_config(instance, vif,
- image_meta, inst_type)
+ inst_type, virt_type):
+ conf = self.get_base_config(instance, vif, image_meta,
+ inst_type, virt_type)
params = vif["qbg_params"]
designer.set_vif_host_backend_802qbg_config(
@@ -292,10 +261,9 @@ def get_config_802qbg(self, instance, vif, image_meta,
return conf
def get_config_802qbh(self, instance, vif, image_meta,
- inst_type):
- conf = super(LibvirtGenericVIFDriver,
- self).get_config(instance, vif,
- image_meta, inst_type)
+ inst_type, virt_type):
+ conf = self.get_base_config(instance, vif, image_meta,
+ inst_type, virt_type)
params = vif["qbh_params"]
designer.set_vif_host_backend_802qbh_config(
@@ -307,10 +275,9 @@ def get_config_802qbh(self, instance, vif, image_meta,
return conf
def get_config_iovisor(self, instance, vif, image_meta,
- inst_type):
- conf = super(LibvirtGenericVIFDriver,
- self).get_config(instance, vif,
- image_meta, inst_type)
+ inst_type, virt_type):
+ conf = self.get_base_config(instance, vif, image_meta,
+ inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
@@ -320,10 +287,9 @@ def get_config_iovisor(self, instance, vif, image_meta,
return conf
def get_config_midonet(self, instance, vif, image_meta,
- inst_type):
- conf = super(LibvirtGenericVIFDriver,
- self).get_config(instance, vif,
- image_meta, inst_type)
+ inst_type, virt_type):
+ conf = self.get_base_config(instance, vif, image_meta,
+ inst_type, virt_type)
dev = self.get_vif_devname(vif)
designer.set_vif_host_backend_ethernet_config(conf, dev)
@@ -331,10 +297,9 @@ def get_config_midonet(self, instance, vif, image_meta,
return conf
def get_config_mlnx_direct(self, instance, vif, image_meta,
- inst_type):
- conf = super(LibvirtGenericVIFDriver,
- self).get_config(instance, vif,
- image_meta, inst_type)
+ inst_type, virt_type):
+ conf = self.get_base_config(instance, vif, image_meta,
+ inst_type, virt_type)
devname = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
designer.set_vif_host_backend_direct_config(conf, devname)
@@ -343,13 +308,22 @@ def get_config_mlnx_direct(self, instance, vif, image_meta,
return conf
- def get_config(self, instance, vif, image_meta, inst_type):
+ def get_config_vhostuser(self, instance, vif, image_meta,
+ inst_type, virt_type):
+ conf = self.get_base_config(instance, vif, image_meta,
+ inst_type, virt_type)
+ designer.set_vif_host_backend_vhostuser_config(conf,
+ vif['vhostuser_mode'], vif['vhostuser_path'])
+ return conf
+
+ def get_config(self, instance, vif, image_meta,
+ inst_type, virt_type):
vif_type = vif['type']
LOG.debug('vif_type=%(vif_type)s instance=%(instance)s '
- 'vif=%(vif)s',
+ 'vif=%(vif)s virt_type%(virt_type)s',
{'vif_type': vif_type, 'instance': instance,
- 'vif': vif})
+ 'vif': vif, 'virt_type': virt_type})
if vif_type is None:
raise exception.NovaException(
@@ -360,12 +334,11 @@ def get_config(self, instance, vif, image_meta, inst_type):
if not func:
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
- return func(instance, vif, image_meta, inst_type)
+ return func(instance, vif, image_meta,
+ inst_type, virt_type)
def plug_bridge(self, instance, vif):
"""Ensure that the bridge exists, and add VIF to it."""
- super(LibvirtGenericVIFDriver,
- self).plug(instance, vif)
network = vif['network']
if (not network.get_meta('multi_host', False) and
network.get_meta('should_create_bridge', False)):
@@ -389,21 +362,9 @@ def plug_bridge(self, instance, vif):
self.get_bridge_name(vif),
iface)
- def plug_ovs_ethernet(self, instance, vif):
- super(LibvirtGenericVIFDriver,
- self).plug(instance, vif)
-
- iface_id = self.get_ovs_interfaceid(vif)
- dev = self.get_vif_devname(vif)
- linux_net.create_tap_dev(dev)
- linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
- dev, iface_id, vif['address'],
- instance['uuid'])
-
def plug_ovs_bridge(self, instance, vif):
"""No manual plugging required."""
- super(LibvirtGenericVIFDriver,
- self).plug(instance, vif)
+ pass
def plug_ovs_hybrid(self, instance, vif):
"""Plug using hybrid strategy
@@ -413,9 +374,6 @@ def plug_ovs_hybrid(self, instance, vif):
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
- super(LibvirtGenericVIFDriver,
- self).plug(instance, vif)
-
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
@@ -442,15 +400,10 @@ def plug_ovs_hybrid(self, instance, vif):
def plug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.plug_ovs_hybrid(instance, vif)
- elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION):
- self.plug_ovs_bridge(instance, vif)
else:
- self.plug_ovs_ethernet(instance, vif)
+ self.plug_ovs_bridge(instance, vif)
def plug_ivs_ethernet(self, instance, vif):
- super(LibvirtGenericVIFDriver,
- self).plug(instance, vif)
-
iface_id = self.get_ovs_interfaceid(vif)
dev = self.get_vif_devname(vif)
linux_net.create_tap_dev(dev)
@@ -465,9 +418,6 @@ def plug_ivs_hybrid(self, instance, vif):
of the veth device just like a normal IVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms.
"""
- super(LibvirtGenericVIFDriver,
- self).plug(instance, vif)
-
iface_id = self.get_ovs_interfaceid(vif)
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
@@ -497,14 +447,12 @@ def plug_ivs(self, instance, vif):
self.plug_ivs_ethernet(instance, vif)
def plug_mlnx_direct(self, instance, vif):
- super(LibvirtGenericVIFDriver,
- self).plug(instance, vif)
-
- network = vif['network']
vnic_mac = vif['address']
device_id = instance['uuid']
- fabric = network['meta']['physical_network']
-
+ fabric = vif.get_physical_network()
+ if not fabric:
+ raise exception.NetworkMissingPhysicalNetwork(
+ network_uuid=vif['network']['id'])
dev_name = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH)
try:
utils.execute('ebrctl', 'add-port', vnic_mac, device_id, fabric,
@@ -514,20 +462,16 @@ def plug_mlnx_direct(self, instance, vif):
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
def plug_802qbg(self, instance, vif):
- super(LibvirtGenericVIFDriver,
- self).plug(instance, vif)
+ pass
def plug_802qbh(self, instance, vif):
- super(LibvirtGenericVIFDriver,
- self).plug(instance, vif)
+ pass
def plug_midonet(self, instance, vif):
"""Plug into MidoNet's network port
Bind the vif to a MidoNet virtual port.
"""
- super(LibvirtGenericVIFDriver,
- self).plug(instance, vif)
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
@@ -543,8 +487,6 @@ def plug_iovisor(self, instance, vif):
Connect a network device to their respective
Virtual Domain in PLUMgrid Platform.
"""
- super(LibvirtGenericVIFDriver,
- self).plug(instance, vif)
dev = self.get_vif_devname(vif)
iface_id = vif['id']
linux_net.create_tap_dev(dev)
@@ -561,6 +503,9 @@ def plug_iovisor(self, instance, vif):
except processutils.ProcessExecutionError:
LOG.exception(_LE("Failed while plugging vif"), instance=instance)
+ def plug_vhostuser(self, instance, vif):
+ pass
+
def plug(self, instance, vif):
vif_type = vif['type']
@@ -582,25 +527,11 @@ def plug(self, instance, vif):
def unplug_bridge(self, instance, vif):
"""No manual unplugging required."""
- super(LibvirtGenericVIFDriver,
- self).unplug(instance, vif)
-
- def unplug_ovs_ethernet(self, instance, vif):
- """Unplug the VIF by deleting the port from the bridge."""
- super(LibvirtGenericVIFDriver,
- self).unplug(instance, vif)
-
- try:
- linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
- self.get_vif_devname(vif))
- except processutils.ProcessExecutionError:
- LOG.exception(_LE("Failed while unplugging vif"),
- instance=instance)
+ pass
def unplug_ovs_bridge(self, instance, vif):
"""No manual unplugging required."""
- super(LibvirtGenericVIFDriver,
- self).unplug(instance, vif)
+ pass
def unplug_ovs_hybrid(self, instance, vif):
"""UnPlug using hybrid strategy
@@ -608,9 +539,6 @@ def unplug_ovs_hybrid(self, instance, vif):
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
- super(LibvirtGenericVIFDriver,
- self).unplug(instance, vif)
-
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
@@ -632,16 +560,11 @@ def unplug_ovs_hybrid(self, instance, vif):
def unplug_ovs(self, instance, vif):
if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled():
self.unplug_ovs_hybrid(instance, vif)
- elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION):
- self.unplug_ovs_bridge(instance, vif)
else:
- self.unplug_ovs_ethernet(instance, vif)
+ self.unplug_ovs_bridge(instance, vif)
def unplug_ivs_ethernet(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
- super(LibvirtGenericVIFDriver,
- self).unplug(instance, vif)
-
try:
linux_net.delete_ivs_vif_port(self.get_vif_devname(vif))
except processutils.ProcessExecutionError:
@@ -654,9 +577,6 @@ def unplug_ivs_hybrid(self, instance, vif):
Unhook port from IVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
- super(LibvirtGenericVIFDriver,
- self).unplug(instance, vif)
-
try:
br_name = self.get_br_name(vif['id'])
v1_name, v2_name = self.get_veth_pair_names(vif['id'])
@@ -677,12 +597,11 @@ def unplug_ivs(self, instance, vif):
self.unplug_ivs_ethernet(instance, vif)
def unplug_mlnx_direct(self, instance, vif):
- super(LibvirtGenericVIFDriver,
- self).unplug(instance, vif)
-
- network = vif['network']
vnic_mac = vif['address']
- fabric = network['meta']['physical_network']
+ fabric = vif.get_physical_network()
+ if not fabric:
+ raise exception.NetworkMissingPhysicalNetwork(
+ network_uuid=vif['network']['id'])
try:
utils.execute('ebrctl', 'del-port', fabric,
vnic_mac, run_as_root=True)
@@ -691,20 +610,16 @@ def unplug_mlnx_direct(self, instance, vif):
instance=instance)
def unplug_802qbg(self, instance, vif):
- super(LibvirtGenericVIFDriver,
- self).unplug(instance, vif)
+ pass
def unplug_802qbh(self, instance, vif):
- super(LibvirtGenericVIFDriver,
- self).unplug(instance, vif)
+ pass
def unplug_midonet(self, instance, vif):
"""Unplug from MidoNet network port
Unbind the vif from a MidoNet virtual port.
"""
- super(LibvirtGenericVIFDriver,
- self).unplug(instance, vif)
dev = self.get_vif_devname(vif)
port_id = vif['id']
try:
@@ -721,8 +636,6 @@ def unplug_iovisor(self, instance, vif):
Delete network device and to their respective
connection to the Virtual Domain in PLUMgrid Platform.
"""
- super(LibvirtGenericVIFDriver,
- self).unplug(instance, vif)
iface_id = vif['id']
dev = self.get_vif_devname(vif)
try:
@@ -737,6 +650,9 @@ def unplug_iovisor(self, instance, vif):
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
+ def unplug_vhostuser(self, instance, vif):
+ pass
+
def unplug(self, instance, vif):
vif_type = vif['type']
@@ -755,36 +671,3 @@ def unplug(self, instance, vif):
raise exception.NovaException(
_("Unexpected vif_type=%s") % vif_type)
func(instance, vif)
-
-# The following classes were removed in the transition from Havana to
-# Icehouse, but may still be referenced in configuration files. The
-# following stubs allow those configurations to work while logging a
-# deprecation warning.
-
-
-class _LibvirtDeprecatedDriver(LibvirtGenericVIFDriver):
- def __init__(self, *args, **kwargs):
- LOG.warn(_LW('VIF driver \"%s\" is marked as deprecated and will be '
- 'removed in the Juno release.'),
- self.__class__.__name__)
- super(_LibvirtDeprecatedDriver, self).__init__(*args, **kwargs)
-
-
-class LibvirtBridgeDriver(_LibvirtDeprecatedDriver):
- pass
-
-
-class LibvirtOpenVswitchDriver(_LibvirtDeprecatedDriver):
- pass
-
-
-class LibvirtHybridOVSBridgeDriver(_LibvirtDeprecatedDriver):
- pass
-
-
-class LibvirtOpenVswitchVirtualPortDriver(_LibvirtDeprecatedDriver):
- pass
-
-
-class NeutronLinuxBridgeVIFDriver(_LibvirtDeprecatedDriver):
- pass
diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py
index 1f4f85cb03..3daf3554d7 100644
--- a/nova/virt/libvirt/volume.py
+++ b/nova/virt/libvirt/volume.py
@@ -26,9 +26,9 @@
import six.moves.urllib.parse as urlparse
from nova import exception
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common.gettextutils import _LE
-from nova.openstack.common.gettextutils import _LW
+from nova.i18n import _
+from nova.i18n import _LE
+from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import processutils
@@ -36,7 +36,7 @@
from nova.storage import linuxscsi
from nova import utils
from nova.virt.libvirt import config as vconfig
-from nova.virt.libvirt import utils as virtutils
+from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
@@ -97,7 +97,7 @@ def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
- conf.driver_name = virtutils.pick_disk_driver_name(
+ conf.driver_name = libvirt_utils.pick_disk_driver_name(
self.connection._get_hypervisor_version(),
self.is_block_dev
)
@@ -231,8 +231,11 @@ def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
- LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
- {'command': iscsi_command, 'out': out, 'err': err})
+ msg = ('iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s' %
+ {'command': iscsi_command, 'out': out, 'err': err})
+ # NOTE(bpokorny): iscsi_command can contain passwords so we need to
+ # sanitize the password in the message.
+ LOG.debug(logging.mask_password(msg))
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
@@ -255,9 +258,9 @@ def connect_volume(self, connection_info, disk_info):
iscsi_properties = connection_info['data']
if self.use_multipath:
- #multipath installed, discovering other targets if available
- #multipath should be configured on the nova-compute node,
- #in order to fit storage vendor
+ # multipath installed, discovering other targets if available
+ # multipath should be configured on the nova-compute node,
+ # in order to fit storage vendor
out = self._run_iscsiadm_bare(['-m',
'discovery',
'-t',
@@ -309,7 +312,7 @@ def connect_volume(self, connection_info, disk_info):
'tries': tries})
if self.use_multipath:
- #we use the multipath device instead of the single path device
+ # we use the multipath device instead of the single path device
self._rescan_multipath()
multipath_device = self._get_multipath_device_name(host_device)
@@ -462,8 +465,8 @@ def _connect_to_iscsi_portal(self, iscsi_properties):
"node.session.auth.password",
iscsi_properties['auth_password'])
- #duplicate logins crash iscsiadm after load,
- #so we scan active sessions to see if the node is logged in.
+ # duplicate logins crash iscsiadm after load,
+ # so we scan active sessions to see if the node is logged in.
out = self._run_iscsiadm_bare(["-m", "session"],
run_as_root=True,
check_exit_code=[0, 1, 21])[0] or ""
@@ -484,8 +487,8 @@ def _connect_to_iscsi_portal(self, iscsi_properties):
("--login",),
check_exit_code=[0, 255])
except processutils.ProcessExecutionError as err:
- #as this might be one of many paths,
- #only set successful logins to startup automatically
+ # as this might be one of many paths,
+ # only set successful logins to startup automatically
if err.exit_code in [15]:
self._iscsiadm_update(iscsi_properties,
"node.startup",
@@ -671,7 +674,7 @@ def _ensure_mounted(self, nfs_export, options=None):
"""
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(nfs_export))
- if not virtutils.is_mounted(mount_path, nfs_export):
+ if not libvirt_utils.is_mounted(mount_path, nfs_export):
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
return mount_path
@@ -727,7 +730,7 @@ def connect_volume(self, connection_info, mount_device):
# NOTE(jbr_): If aoedevpath does not exist, do a discover.
self._aoe_discover()
- #NOTE(jbr_): Device path is not always present immediately
+ # NOTE(jbr_): Device path is not always present immediately
def _wait_for_device_discovery(aoedevpath, mount_device):
tries = self.tries
if os.path.exists(aoedevpath):
@@ -821,7 +824,7 @@ def _ensure_mounted(self, glusterfs_export, options=None):
"""
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(glusterfs_export))
- if not virtutils.is_mounted(mount_path, glusterfs_export):
+ if not libvirt_utils.is_mounted(mount_path, glusterfs_export):
self._mount_glusterfs(mount_path, glusterfs_export,
options, ensure=True)
return mount_path
@@ -891,7 +894,7 @@ def connect_volume(self, connection_info, disk_info):
# We need to look for wwns on every hba
# because we don't know ahead of time
# where they will show up.
- hbas = virtutils.get_fc_hbas_info()
+ hbas = libvirt_utils.get_fc_hbas_info()
host_devices = []
for hba in hbas:
pci_num = self._get_pci_num(hba)
@@ -977,7 +980,6 @@ def disconnect_volume(self, connection_info, mount_device):
"""Detach the volume from instance_name."""
super(LibvirtFibreChannelVolumeDriver,
self).disconnect_volume(connection_info, mount_device)
- devices = connection_info['data']['devices']
# If this is a multipath device, we need to search again
# and make sure we remove all the devices. Some of them
@@ -987,6 +989,11 @@ def disconnect_volume(self, connection_info, mount_device):
mdev_info = linuxscsi.find_multipath_device(multipath_id)
devices = mdev_info['devices']
LOG.debug("devices to remove = %s", devices)
+ else:
+ # only needed when multipath-tools work improperly
+ devices = connection_info['data'].get('devices', [])
+ LOG.warn(_LW("multipath-tools probably work improperly. "
+ "devices to remove = %s.") % devices)
# There may have been more than 1 device mounted
# by the kernel for this volume. We have to remove
diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py
index 6842c61b41..c3319d3ced 100644
--- a/nova/virt/netutils.py
+++ b/nova/virt/netutils.py
@@ -66,8 +66,8 @@ def _get_first_network(network, version):
pass
-def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6,
- template=CONF.injected_network_template):
+def get_injected_network_template(network_info, use_ipv6=None, template=None,
+ libvirt_virt_type=None):
"""Returns a rendered network template for the given network_info.
:param network_info:
@@ -75,7 +75,15 @@ def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6,
:param use_ipv6: If False, do not return IPv6 template information
even if an IPv6 subnet is present in network_info.
:param template: Path to the interfaces template file.
+ :param libvirt_virt_type: The Libvirt `virt_type`, will be `None` for
+ other hypervisors..
"""
+ if use_ipv6 is None:
+ use_ipv6 = CONF.use_ipv6
+
+ if not template:
+ template = CONF.injected_network_template
+
if not (network_info and template):
return
@@ -153,4 +161,5 @@ def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6,
trim_blocks=True)
template = env.get_template(tmpl_file)
return template.render({'interfaces': nets,
- 'use_ipv6': ipv6_is_available})
+ 'use_ipv6': ipv6_is_available,
+ 'libvirt_virt_type': libvirt_virt_type})
diff --git a/nova/virt/storage_users.py b/nova/virt/storage_users.py
index 752f07efc0..edb0215f8d 100644
--- a/nova/virt/storage_users.py
+++ b/nova/virt/storage_users.py
@@ -13,13 +13,13 @@
# under the License.
-import json
import os
import time
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
@@ -59,7 +59,7 @@ def do_register_storage_use(storage_path, hostname):
if os.path.exists(id_path):
with open(id_path) as f:
try:
- d = json.loads(f.read())
+ d = jsonutils.loads(f.read())
except ValueError:
LOG.warning(_("Cannot decode JSON from %(id_path)s"),
{"id_path": id_path})
@@ -67,7 +67,7 @@ def do_register_storage_use(storage_path, hostname):
d[hostname] = time.time()
with open(id_path, 'w') as f:
- f.write(json.dumps(d))
+ f.write(jsonutils.dumps(d))
return do_register_storage_use(storage_path, hostname)
@@ -97,7 +97,7 @@ def do_get_storage_users(storage_path):
if os.path.exists(id_path):
with open(id_path) as f:
try:
- d = json.loads(f.read())
+ d = jsonutils.loads(f.read())
except ValueError:
LOG.warning(_("Cannot decode JSON from %(id_path)s"),
{"id_path": id_path})
diff --git a/nova/virt/vmwareapi/__init__.py b/nova/virt/vmwareapi/__init__.py
index 9fdf6a7b8d..022e525284 100644
--- a/nova/virt/vmwareapi/__init__.py
+++ b/nova/virt/vmwareapi/__init__.py
@@ -13,10 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
-:mod:`vmwareapi` -- Nova support for VMware ESX/vCenter through VMware API.
+:mod:`vmwareapi` -- Nova support for VMware vCenter through VMware API.
"""
# NOTE(sdague) for nicer compute_driver specification
from nova.virt.vmwareapi import driver
+# VMwareESXDriver is deprecated in Juno. This property definition
+# allows those configurations to work which reference it while
+# logging a deprecation warning
VMwareESXDriver = driver.VMwareESXDriver
VMwareVCDriver = driver.VMwareVCDriver
diff --git a/nova/virt/vmwareapi/constants.py b/nova/virt/vmwareapi/constants.py
index 6493c556e7..449618247c 100644
--- a/nova/virt/vmwareapi/constants.py
+++ b/nova/virt/vmwareapi/constants.py
@@ -18,8 +18,15 @@
from nova.network import model as network_model
+DISK_FORMAT_ISO = 'iso'
+DISK_FORMAT_VMDK = 'vmdk'
+DISK_FORMATS_ALL = [DISK_FORMAT_ISO, DISK_FORMAT_VMDK]
+
+DISK_TYPE_SPARSE = 'sparse'
+DISK_TYPE_PREALLOCATED = 'preallocated'
DEFAULT_VIF_MODEL = network_model.VIF_MODEL_E1000
DEFAULT_OS_TYPE = "otherGuest"
DEFAULT_ADAPTER_TYPE = "lsiLogic"
-DEFAULT_DISK_TYPE = "preallocated"
+DEFAULT_DISK_TYPE = DISK_TYPE_PREALLOCATED
+DEFAULT_DISK_FORMAT = DISK_FORMAT_VMDK
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 313ef5f87c..0244b29bff 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -16,7 +16,7 @@
# under the License.
"""
-A connection to the VMware ESX/vCenter platform.
+A connection to the VMware vCenter platform.
"""
import re
@@ -28,8 +28,7 @@
import suds
from nova import exception
-from nova.openstack.common.gettextutils import _
-from nova.openstack.common.gettextutils import _LC
+from nova.i18n import _, _LC, _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
@@ -48,19 +47,18 @@
vmwareapi_opts = [
cfg.StrOpt('host_ip',
- help='Hostname or IP address for connection to VMware ESX/VC '
+ help='Hostname or IP address for connection to VMware VC '
'host.'),
cfg.IntOpt('host_port',
default=443,
- help='Port for connection to VMware ESX/VC host.'),
+ help='Port for connection to VMware VC host.'),
cfg.StrOpt('host_username',
- help='Username for connection to VMware ESX/VC host.'),
+ help='Username for connection to VMware VC host.'),
cfg.StrOpt('host_password',
- help='Password for connection to VMware ESX/VC host.',
+ help='Password for connection to VMware VC host.',
secret=True),
cfg.MultiStrOpt('cluster_name',
- help='Name of a VMware Cluster ComputeResource. Used only if '
- 'compute_driver is vmwareapi.VMwareVCDriver.'),
+ help='Name of a VMware Cluster ComputeResource.'),
cfg.StrOpt('datastore_regex',
help='Regex to match the name of a datastore.'),
cfg.FloatOpt('task_poll_interval',
@@ -87,38 +85,45 @@
TIME_BETWEEN_API_CALL_RETRIES = 1.0
+# The following class was removed in the transition from Icehouse to
+# Juno, but may still be referenced in configuration files. The
+# following stub allow those configurations to work while logging a
+# deprecation warning.
class VMwareESXDriver(driver.ComputeDriver):
"""The ESX host connection object."""
+ def _do_deprecation_warning(self):
+ LOG.warn(_LW('The VMware ESX driver is now deprecated and has been '
+ 'removed in the Juno release. The VC driver will remain '
+ 'and continue to be supported.'))
+
+ def __init__(self, virtapi, read_only=False, scheme="https"):
+ self._do_deprecation_warning()
+
+
+class VMwareVCDriver(driver.ComputeDriver):
+ """The VC host connection object."""
+
capabilities = {
"has_imagecache": True,
"supports_recreate": False,
}
- # VMwareAPI has both ESXi and vCenter API sets.
- # The ESXi API are a proper sub-set of the vCenter API.
- # That is to say, nearly all valid ESXi calls are
- # valid vCenter calls. There are some small edge-case
- # exceptions regarding VNC, CIM, User management & SSO.
-
- def _do_deprecation_warning(self):
- LOG.warning(_('The VMware ESX driver is now deprecated and will be '
- 'removed in the Juno release. The VC driver will remain '
- 'and continue to be supported.'))
-
- def __init__(self, virtapi, read_only=False, scheme="https"):
- super(VMwareESXDriver, self).__init__(virtapi)
+ # The vCenter driver includes API that acts on ESX hosts or groups
+ # of ESX hosts in clusters or non-cluster logical-groupings.
+ #
+ # vCenter is not a hypervisor itself, it works with multiple
+ # hypervisor host machines and their guests. This fact can
+ # subtly alter how vSphere and OpenStack interoperate.
- self._do_deprecation_warning()
+ def __init__(self, virtapi, scheme="https"):
+ super(VMwareVCDriver, self).__init__(virtapi)
- self._host_ip = CONF.vmware.host_ip
- if not (self._host_ip or CONF.vmware.host_username is None or
- CONF.vmware.host_password is None):
- raise Exception(_("Must specify host_ip, "
- "host_username "
- "and host_password to use "
- "compute_driver=vmwareapi.VMwareESXDriver or "
- "vmwareapi.VMwareVCDriver"))
+ if (CONF.vmware.host_ip is None or
+ CONF.vmware.host_username is None or
+ CONF.vmware.host_password is None):
+ raise Exception(_("Must specify host_ip, host_username and "
+ "host_password to use vmwareapi.VMwareVCDriver"))
self._datastore_regex = None
if CONF.vmware.datastore_regex:
@@ -130,23 +135,41 @@ def __init__(self, virtapi, read_only=False, scheme="https"):
% CONF.vmware.datastore_regex)
self._session = VMwareAPISession(scheme=scheme)
- self._volumeops = volumeops.VMwareVolumeOps(self._session)
- self._vmops = vmops.VMwareVMOps(self._session, self.virtapi,
- self._volumeops,
- datastore_regex=self._datastore_regex)
- self._host = host.Host(self._session)
- self._host_state = None
-
- #TODO(hartsocks): back-off into a configuration test module.
+
+ # TODO(hartsocks): back-off into a configuration test module.
if CONF.vmware.use_linked_clone is None:
raise error_util.UseLinkedCloneConfigurationFault()
- @property
- def host_state(self):
- if not self._host_state:
- self._host_state = host.HostState(self._session,
- self._host_ip)
- return self._host_state
+ # Get the list of clusters to be used
+ self._cluster_names = CONF.vmware.cluster_name
+ self.dict_mors = vm_util.get_all_cluster_refs_by_name(self._session,
+ self._cluster_names)
+ if not self.dict_mors:
+ raise exception.NotFound(_("All clusters specified %s were not"
+ " found in the vCenter")
+ % self._cluster_names)
+
+ # Check if there are any clusters that were specified in the nova.conf
+ # but are not in the vCenter, for missing clusters log a warning.
+ clusters_found = [v.get('name') for k, v in self.dict_mors.iteritems()]
+ missing_clusters = set(self._cluster_names) - set(clusters_found)
+ if missing_clusters:
+ LOG.warn(_LW("The following clusters could not be found in the "
+ "vCenter %s") % list(missing_clusters))
+
+ # The _resources is used to maintain the vmops, volumeops and vcstate
+ # objects per cluster
+ self._resources = {}
+ self._resource_keys = set()
+ self._virtapi = virtapi
+ self._update_resources()
+
+ # The following initialization is necessary since the base class does
+ # not use VC state.
+ first_cluster = self._resources.keys()[0]
+ self._vmops = self._resources.get(first_cluster).get('vmops')
+ self._volumeops = self._resources.get(first_cluster).get('volumeops')
+ self._vc_state = self._resources.get(first_cluster).get('vcstate')
def init_host(self, host):
vim = self._session.vim
@@ -164,76 +187,11 @@ def cleanup_host(self, host):
except suds.WebFault:
LOG.debug("No vSphere session was open during cleanup_host.")
- def list_instances(self):
- """List VM instances."""
- return self._vmops.list_instances()
-
- def spawn(self, context, instance, image_meta, injected_files,
- admin_password, network_info=None, block_device_info=None):
- """Create VM instance."""
- self._vmops.spawn(context, instance, image_meta, injected_files,
- admin_password, network_info, block_device_info)
-
- def snapshot(self, context, instance, image_id, update_task_state):
- """Create snapshot from a running VM instance."""
- self._vmops.snapshot(context, instance, image_id, update_task_state)
-
- def reboot(self, context, instance, network_info, reboot_type,
- block_device_info=None, bad_volumes_callback=None):
- """Reboot VM instance."""
- self._vmops.reboot(instance, network_info)
-
- def destroy(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True, migrate_data=None):
- """Destroy VM instance."""
-
- # Destroy gets triggered when Resource Claim in resource_tracker
- # is not successful. When resource claim is not successful,
- # node is not set in instance. Perform destroy only if node is set
- if not instance['node']:
- return
-
- self._vmops.destroy(instance, network_info, destroy_disks)
-
def cleanup(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True, migrate_data=None):
+ destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
- def pause(self, instance):
- """Pause VM instance."""
- self._vmops.pause(instance)
-
- def unpause(self, instance):
- """Unpause paused VM instance."""
- self._vmops.unpause(instance)
-
- def suspend(self, instance):
- """Suspend the specified instance."""
- self._vmops.suspend(instance)
-
- def resume(self, context, instance, network_info, block_device_info=None):
- """Resume the suspended VM instance."""
- self._vmops.resume(instance)
-
- def rescue(self, context, instance, network_info, image_meta,
- rescue_password):
- """Rescue the specified instance."""
- self._vmops.rescue(context, instance, network_info, image_meta)
-
- def unrescue(self, instance, network_info):
- """Unrescue the specified instance."""
- self._vmops.unrescue(instance)
-
- def power_off(self, instance):
- """Power off the specified instance."""
- self._vmops.power_off(instance)
-
- def power_on(self, context, instance, network_info,
- block_device_info=None):
- """Power on the specified instance."""
- self._vmops.power_on(instance)
-
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
@@ -241,11 +199,11 @@ def resume_state_on_host_boot(self, context, instance, network_info,
# anything if it is.
instances = self.list_instances()
if instance['uuid'] not in instances:
- LOG.warn(_('Instance cannot be found in host, or in an unknown'
- 'state.'), instance=instance)
+ LOG.warn(_LW('Instance cannot be found in host, or in an unknown'
+ 'state.'), instance=instance)
else:
state = vm_util.get_vm_state_from_name(self._session,
- instance['uuid'])
+ instance['uuid'])
ignored_states = ['poweredon', 'suspended']
if state.lower() in ignored_states:
@@ -254,173 +212,13 @@ def resume_state_on_host_boot(self, context, instance, network_info,
# Be as absolute as possible about getting it back into
# a known and running state.
self.reboot(context, instance, network_info, 'hard',
- block_device_info)
-
- def poll_rebooting_instances(self, timeout, instances):
- """Poll for rebooting instances."""
- self._vmops.poll_rebooting_instances(timeout, instances)
-
- def get_info(self, instance):
- """Return info about the VM instance."""
- return self._vmops.get_info(instance)
-
- def get_diagnostics(self, instance):
- """Return data about VM diagnostics."""
- return self._vmops.get_diagnostics(instance)
-
- def get_vnc_console(self, context, instance):
- """Return link to instance's VNC console."""
- return self._vmops.get_vnc_console(instance)
-
- def get_volume_connector(self, instance):
- """Return volume connector information."""
- return self._volumeops.get_volume_connector(instance)
-
- def get_host_ip_addr(self):
- """Retrieves the IP address of the ESX host."""
- return self._host_ip
-
- def attach_volume(self, context, connection_info, instance, mountpoint,
- disk_bus=None, device_type=None, encryption=None):
- """Attach volume storage to VM instance."""
- return self._volumeops.attach_volume(connection_info,
- instance,
- mountpoint)
-
- def detach_volume(self, connection_info, instance, mountpoint,
- encryption=None):
- """Detach volume storage to VM instance."""
- return self._volumeops.detach_volume(connection_info,
- instance,
- mountpoint)
-
- def get_console_pool_info(self, console_type):
- """Get info about the host on which the VM resides."""
- return {'address': CONF.vmware.host_ip,
- 'username': CONF.vmware.host_username,
- 'password': CONF.vmware.host_password}
-
- def _get_available_resources(self, host_stats):
- return {'vcpus': host_stats['vcpus'],
- 'memory_mb': host_stats['host_memory_total'],
- 'local_gb': host_stats['disk_total'],
- 'vcpus_used': 0,
- 'memory_mb_used': host_stats['host_memory_total'] -
- host_stats['host_memory_free'],
- 'local_gb_used': host_stats['disk_used'],
- 'hypervisor_type': host_stats['hypervisor_type'],
- 'hypervisor_version': host_stats['hypervisor_version'],
- 'hypervisor_hostname': host_stats['hypervisor_hostname'],
- 'cpu_info': jsonutils.dumps(host_stats['cpu_info']),
- 'supported_instances': jsonutils.dumps(
- host_stats['supported_instances']),
- }
-
- def get_available_resource(self, nodename):
- """Retrieve resource information.
-
- This method is called when nova-compute launches, and
- as part of a periodic task that records the results in the DB.
-
- :returns: dictionary describing resources
-
- """
- host_stats = self.get_host_stats(refresh=True)
-
- # Updating host information
- return self._get_available_resources(host_stats)
-
- def get_host_stats(self, refresh=False):
- """Return the current state of the host.
-
- If 'refresh' is True, run the update first.
- """
- return self.host_state.get_host_stats(refresh=refresh)
-
- def host_power_action(self, host, action):
- """Reboots, shuts down or powers up the host."""
- return self._host.host_power_action(host, action)
-
- def host_maintenance_mode(self, host, mode):
- """Start/Stop host maintenance window. On start, it triggers
- guest VMs evacuation.
- """
- return self._host.host_maintenance_mode(host, mode)
-
- def set_host_enabled(self, host, enabled):
- """Sets the specified host's ability to accept new instances."""
- return self._host.set_host_enabled(host, enabled)
-
- def get_host_uptime(self, host):
- return 'Please refer to %s for the uptime' % CONF.vmware.host_ip
-
- def inject_network_info(self, instance, nw_info):
- """inject network info for specified instance."""
- self._vmops.inject_network_info(instance, nw_info)
+ block_device_info)
def list_instance_uuids(self):
"""List VM instance UUIDs."""
uuids = self._vmops.list_instances()
return [uuid for uuid in uuids if uuidutils.is_uuid_like(uuid)]
- def manage_image_cache(self, context, all_instances):
- """Manage the local cache of images."""
- self._vmops.manage_image_cache(context, all_instances)
-
- def instance_exists(self, instance):
- """Efficient override of base instance_exists method."""
- return self._vmops.instance_exists(instance)
-
-
-class VMwareVCDriver(VMwareESXDriver):
- """The VC host connection object."""
-
- # The vCenter driver includes several additional VMware vSphere
- # capabilities that include API that act on hosts or groups of
- # hosts in clusters or non-cluster logical-groupings.
- #
- # vCenter is not a hypervisor itself, it works with multiple
- # hypervisor host machines and their guests. This fact can
- # subtly alter how vSphere and OpenStack interoperate.
-
- def _do_deprecation_warning(self):
- # Driver validated by VMware's Minesweeper CI
- pass
-
- def __init__(self, virtapi, read_only=False, scheme="https"):
- super(VMwareVCDriver, self).__init__(virtapi)
-
- # Get the list of clusters to be used
- self._cluster_names = CONF.vmware.cluster_name
- self.dict_mors = vm_util.get_all_cluster_refs_by_name(self._session,
- self._cluster_names)
- if not self.dict_mors:
- raise exception.NotFound(_("All clusters specified %s were not"
- " found in the vCenter")
- % self._cluster_names)
-
- # Check if there are any clusters that were specified in the nova.conf
- # but are not in the vCenter, for missing clusters log a warning.
- clusters_found = [v.get('name') for k, v in self.dict_mors.iteritems()]
- missing_clusters = set(self._cluster_names) - set(clusters_found)
- if missing_clusters:
- LOG.warn(_("The following clusters could not be found in the"
- " vCenter %s") % list(missing_clusters))
-
- # The _resources is used to maintain the vmops, volumeops and vcstate
- # objects per cluster
- self._resources = {}
- self._resource_keys = set()
- self._virtapi = virtapi
- self._update_resources()
-
- # The following initialization is necessary since the base class does
- # not use VC state.
- first_cluster = self._resources.keys()[0]
- self._vmops = self._resources.get(first_cluster).get('vmops')
- self._volumeops = self._resources.get(first_cluster).get('volumeops')
- self._vc_state = self._resources.get(first_cluster).get('vcstate')
-
def list_instances(self):
"""List VM instances from all nodes."""
instances = []
@@ -432,10 +230,12 @@ def list_instances(self):
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
- block_device_info=None):
+ block_device_info=None,
+ timeout=0, retry_interval=0):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
+ # TODO(PhilDay): Add support for timeout (clean shutdown)
_vmops = self._get_vmops_for_compute_node(instance['node'])
return _vmops.migrate_disk_and_power_off(context, instance,
dest, flavor)
@@ -477,11 +277,12 @@ def rollback_live_migration_at_destination(self, context, instance,
"""Clean up destination node after a failed live migration."""
self.destroy(context, instance, network_info, block_device_info)
+ def get_instance_disk_info(self, instance_name, block_device_info=None):
+ pass
+
def get_vnc_console(self, context, instance):
"""Return link to instance's VNC console using vCenter logic."""
- # In this situation, ESXi and vCenter require different
- # API logic to create a valid VNC console connection object.
- # In specific, vCenter does not actually run the VNC service
+ # vCenter does not actually run the VNC service
# itself. You must talk to the VNC host underneath vCenter.
_vmops = self._get_vmops_for_compute_node(instance['node'])
return _vmops.get_vnc_console(instance)
@@ -505,8 +306,7 @@ def _update_resources(self):
added_nodes = set(self.dict_mors.keys()) - set(self._resource_keys)
for node in added_nodes:
_volumeops = volumeops.VMwareVolumeOps(self._session,
- self.dict_mors[node]['cluster_mor'],
- vc_support=True)
+ self.dict_mors[node]['cluster_mor'])
_vmops = vmops.VMwareVCVMOps(self._session, self._virtapi,
_volumeops,
self.dict_mors[node]['cluster_mor'],
@@ -571,6 +371,22 @@ def _get_vc_state_for_compute_node(self, nodename):
resource = self._get_resource_for_node(nodename)
return resource['vcstate']
+ def _get_available_resources(self, host_stats):
+ return {'vcpus': host_stats['vcpus'],
+ 'memory_mb': host_stats['host_memory_total'],
+ 'local_gb': host_stats['disk_total'],
+ 'vcpus_used': 0,
+ 'memory_mb_used': host_stats['host_memory_total'] -
+ host_stats['host_memory_free'],
+ 'local_gb_used': host_stats['disk_used'],
+ 'hypervisor_type': host_stats['hypervisor_type'],
+ 'hypervisor_version': host_stats['hypervisor_version'],
+ 'hypervisor_hostname': host_stats['hypervisor_hostname'],
+ 'cpu_info': jsonutils.dumps(host_stats['cpu_info']),
+ 'supported_instances': jsonutils.dumps(
+ host_stats['supported_instances']),
+ }
+
def get_available_resource(self, nodename):
"""Retrieve resource info.
@@ -650,6 +466,10 @@ def get_volume_connector(self, instance):
_volumeops = self._get_volumeops_for_compute_node(instance['node'])
return _volumeops.get_volume_connector(instance)
+ def get_host_ip_addr(self):
+ """Returns the IP address of the vCenter host."""
+ return CONF.vmware.host_ip
+
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
@@ -672,7 +492,7 @@ def destroy(self, context, instance, network_info, block_device_info=None,
return
_vmops = self._get_vmops_for_compute_node(instance['node'])
- _vmops.destroy(instance, network_info, destroy_disks)
+ _vmops.destroy(instance, destroy_disks)
def pause(self, instance):
"""Pause VM instance."""
@@ -705,8 +525,9 @@ def unrescue(self, instance, network_info):
_vmops = self._get_vmops_for_compute_node(instance.node)
_vmops.unrescue(instance)
- def power_off(self, instance):
+ def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
+ # TODO(PhilDay): Add support for timeout (clean shutdown)
_vmops = self._get_vmops_for_compute_node(instance['node'])
_vmops.power_off(instance)
@@ -730,7 +551,14 @@ def get_info(self, instance):
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
_vmops = self._get_vmops_for_compute_node(instance['node'])
- return _vmops.get_diagnostics(instance)
+ data = _vmops.get_diagnostics(instance)
+ return data
+
+ def get_instance_diagnostics(self, instance):
+ """Return data about VM diagnostics."""
+ _vmops = self._get_vmops_for_compute_node(instance['node'])
+ data = _vmops.get_instance_diagnostics(instance)
+ return data
def host_power_action(self, host, action):
"""Host operations not supported by VC driver.
@@ -790,9 +618,19 @@ def instance_exists(self, instance):
_vmops = self._get_vmops_for_compute_node(instance['node'])
return _vmops.instance_exists(instance)
+ def attach_interface(self, instance, image_meta, vif):
+ """Attach an interface to the instance."""
+ _vmops = self._get_vmops_for_compute_node(instance.node)
+ _vmops.attach_interface(instance, image_meta, vif)
+
+ def detach_interface(self, instance, vif):
+ """Detach an interface from the instance."""
+ _vmops = self._get_vmops_for_compute_node(instance.node)
+ _vmops.detach_interface(instance, vif)
+
class VMwareAPISession(object):
- """Sets up a session with the VC/ESX host and handles all
+ """Sets up a session with the VC host and handles all
the calls made to the host.
"""
@@ -818,7 +656,7 @@ def _get_vim_object(self):
port=self._host_port)
def _create_session(self):
- """Creates a session with the VC/ESX host."""
+ """Creates a session with the VC host."""
delay = 1
diff --git a/nova/virt/vmwareapi/ds_util.py b/nova/virt/vmwareapi/ds_util.py
index ad3fa1eda7..004f47a562 100644
--- a/nova/virt/vmwareapi/ds_util.py
+++ b/nova/virt/vmwareapi/ds_util.py
@@ -17,11 +17,15 @@
"""
import posixpath
-from nova.openstack.common.gettextutils import _
+from nova import exception
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import error_util
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
+ALLOWED_DATASTORE_TYPES = ['VMFS', 'NFS']
class Datastore(object):
@@ -91,12 +95,14 @@ class DatastorePath(object):
file path to a virtual disk.
Note:
- - Datastore path representations always uses forward slash as separator
+
+ * Datastore path representations always uses forward slash as separator
(hence the use of the posixpath module).
- - Datastore names are enclosed in square brackets.
- - Path part of datastore path is relative to the root directory
+ * Datastore names are enclosed in square brackets.
+ * Path part of datastore path is relative to the root directory
of the datastore, and is always separated from the [ds_name] part with
a single space.
+
"""
VMDK_EXTENSION = "vmdk"
@@ -137,6 +143,19 @@ def dirname(self):
def rel_path(self):
return self._rel_path
+ def join(self, *paths):
+ if paths:
+ if None in paths:
+ raise ValueError(_("path component cannot be None"))
+ return DatastorePath(self.datastore,
+ posixpath.join(self._rel_path, *paths))
+ return self
+
+ def __eq__(self, other):
+ return (isinstance(other, DatastorePath) and
+ self._datastore_name == other._datastore_name and
+ self._rel_path == other._rel_path)
+
@classmethod
def parse(cls, datastore_path):
"""Constructs a DatastorePath object given a datastore path string."""
@@ -152,20 +171,175 @@ def parse(cls, datastore_path):
return cls(datastore_name, path.strip())
-# TODO(vui): remove after converting all callers to use Datastore.build_path()
-def build_datastore_path(datastore_name, path):
- """Build the datastore compliant path."""
- return str(DatastorePath(datastore_name, path))
+# NOTE(mdbooth): this convenience function is temporarily duplicated in
+# vm_util. The correct fix is to handle paginated results as they are returned
+# from the relevant vim_util function. However, vim_util is currently
+# effectively deprecated as we migrate to oslo.vmware. This duplication will be
+# removed when we fix it properly in oslo.vmware.
+def _get_token(results):
+ """Get the token from the property results."""
+ return getattr(results, 'token', None)
+
+def _select_datastore(data_stores, best_match, datastore_regex=None):
+ """Find the most preferable datastore in a given RetrieveResult object.
-def file_delete(session, datastore_path, dc_ref):
- LOG.debug("Deleting the datastore file %s", datastore_path)
+ :param data_stores: a RetrieveResult object from vSphere API call
+ :param best_match: the current best match for datastore
+ :param datastore_regex: an optional regular expression to match names
+ :return: datastore_ref, datastore_name, capacity, freespace
+ """
+
+ # data_stores is actually a RetrieveResult object from vSphere API call
+ for obj_content in data_stores.objects:
+ # the propset attribute "need not be set" by returning API
+ if not hasattr(obj_content, 'propSet'):
+ continue
+
+ propdict = vm_util.propset_dict(obj_content.propSet)
+ if _is_datastore_valid(propdict, datastore_regex):
+ new_ds = Datastore(
+ ref=obj_content.obj,
+ name=propdict['summary.name'],
+ capacity=propdict['summary.capacity'],
+ freespace=propdict['summary.freeSpace'])
+ # favor datastores with more free space
+ if (best_match is None or
+ new_ds.freespace > best_match.freespace):
+ best_match = new_ds
+
+ return best_match
+
+
+def _is_datastore_valid(propdict, datastore_regex):
+ """Checks if a datastore is valid based on the following criteria.
+
+ Criteria:
+ - Datastore is accessible
+ - Datastore is not in maintenance mode (optional)
+ - Datastore is of a supported disk type
+ - Datastore matches the supplied regex (optional)
+
+ :param propdict: datastore summary dict
+ :param datastore_regex : Regex to match the name of a datastore.
+ """
+
+ # Local storage identifier vSphere doesn't support CIFS or
+ # vfat for datastores, therefore filtered
+ return (propdict.get('summary.accessible') and
+ (propdict.get('summary.maintenanceMode') is None or
+ propdict.get('summary.maintenanceMode') == 'normal') and
+ propdict['summary.type'] in ALLOWED_DATASTORE_TYPES and
+ (datastore_regex is None or
+ datastore_regex.match(propdict['summary.name'])))
+
+
+def get_datastore(session, cluster=None, host=None, datastore_regex=None):
+ """Get the datastore list and choose the most preferable one."""
+ if cluster is None and host is None:
+ data_stores = session._call_method(vim_util, "get_objects",
+ "Datastore", ["summary.type", "summary.name",
+ "summary.capacity", "summary.freeSpace",
+ "summary.accessible",
+ "summary.maintenanceMode"])
+ else:
+ if cluster is not None:
+ datastore_ret = session._call_method(
+ vim_util,
+ "get_dynamic_property", cluster,
+ "ClusterComputeResource", "datastore")
+ else:
+ datastore_ret = session._call_method(
+ vim_util,
+ "get_dynamic_property", host,
+ "HostSystem", "datastore")
+
+ if not datastore_ret:
+ raise exception.DatastoreNotFound()
+ data_store_mors = datastore_ret.ManagedObjectReference
+ data_stores = session._call_method(vim_util,
+ "get_properties_for_a_collection_of_objects",
+ "Datastore", data_store_mors,
+ ["summary.type", "summary.name",
+ "summary.capacity", "summary.freeSpace",
+ "summary.accessible",
+ "summary.maintenanceMode"])
+ best_match = None
+ while data_stores:
+ best_match = _select_datastore(data_stores, best_match,
+ datastore_regex)
+ token = _get_token(data_stores)
+ if not token:
+ break
+ data_stores = session._call_method(vim_util,
+ "continue_to_get_objects",
+ token)
+ if best_match:
+ return best_match
+ if datastore_regex:
+ raise exception.DatastoreNotFound(
+ _("Datastore regex %s did not match any datastores")
+ % datastore_regex.pattern)
+ else:
+ raise exception.DatastoreNotFound()
+
+
+def _get_allowed_datastores(data_stores, datastore_regex):
+ allowed = []
+ for obj_content in data_stores.objects:
+ # the propset attribute "need not be set" by returning API
+ if not hasattr(obj_content, 'propSet'):
+ continue
+
+ propdict = vm_util.propset_dict(obj_content.propSet)
+ if _is_datastore_valid(propdict, datastore_regex):
+ allowed.append(Datastore(ref=obj_content.obj,
+ name=propdict['summary.name']))
+
+ return allowed
+
+
+def get_available_datastores(session, cluster=None, datastore_regex=None):
+ """Get the datastore list and choose the first local storage."""
+ if cluster:
+ mobj = cluster
+ resource_type = "ClusterComputeResource"
+ else:
+ mobj = vm_util.get_host_ref(session)
+ resource_type = "HostSystem"
+ ds = session._call_method(vim_util, "get_dynamic_property", mobj,
+ resource_type, "datastore")
+ if not ds:
+ return []
+ data_store_mors = ds.ManagedObjectReference
+ # NOTE(garyk): use utility method to retrieve remote objects
+ data_stores = session._call_method(vim_util,
+ "get_properties_for_a_collection_of_objects",
+ "Datastore", data_store_mors,
+ ["summary.type", "summary.name", "summary.accessible",
+ "summary.maintenanceMode"])
+
+ allowed = []
+ while data_stores:
+ allowed.extend(_get_allowed_datastores(data_stores, datastore_regex))
+ token = _get_token(data_stores)
+ if not token:
+ break
+
+ data_stores = session._call_method(vim_util,
+ "continue_to_get_objects",
+ token)
+ return allowed
+
+
+def file_delete(session, ds_path, dc_ref):
+ LOG.debug("Deleting the datastore file %s", ds_path)
vim = session._get_vim()
file_delete_task = session._call_method(
session._get_vim(),
"DeleteDatastoreFile_Task",
vim.get_service_content().fileManager,
- name=datastore_path,
+ name=str(ds_path),
datacenter=dc_ref)
session._wait_for_task(file_delete_task)
LOG.debug("Deleted the datastore file")
@@ -176,22 +350,24 @@ def file_move(session, dc_ref, src_file, dst_file):
The list of possible faults that the server can return on error
include:
- - CannotAccessFile: Thrown if the source file or folder cannot be
- moved because of insufficient permissions.
- - FileAlreadyExists: Thrown if a file with the given name already
- exists at the destination.
- - FileFault: Thrown if there is a generic file error
- - FileLocked: Thrown if the source file or folder is currently
- locked or in use.
- - FileNotFound: Thrown if the file or folder specified by sourceName
- is not found.
- - InvalidDatastore: Thrown if the operation cannot be performed on
- the source or destination datastores.
- - NoDiskSpace: Thrown if there is not enough space available on the
- destination datastore.
- - RuntimeFault: Thrown if any type of runtime fault is thrown that
- is not covered by the other faults; for example,
- a communication error.
+
+ * CannotAccessFile: Thrown if the source file or folder cannot be
+ moved because of insufficient permissions.
+ * FileAlreadyExists: Thrown if a file with the given name already
+ exists at the destination.
+ * FileFault: Thrown if there is a generic file error
+ * FileLocked: Thrown if the source file or folder is currently
+ locked or in use.
+ * FileNotFound: Thrown if the file or folder specified by sourceName
+ is not found.
+ * InvalidDatastore: Thrown if the operation cannot be performed on
+ the source or destination datastores.
+ * NoDiskSpace: Thrown if there is not enough space available on the
+ destination datastore.
+ * RuntimeFault: Thrown if any type of runtime fault is thrown that
+ is not covered by the other faults; for example,
+ a communication error.
+
"""
LOG.debug("Moving file from %(src)s to %(dst)s.",
{'src': src_file, 'dst': dst_file})
@@ -200,9 +376,9 @@ def file_move(session, dc_ref, src_file, dst_file):
session._get_vim(),
"MoveDatastoreFile_Task",
vim.get_service_content().fileManager,
- sourceName=src_file,
+ sourceName=str(src_file),
sourceDatacenter=dc_ref,
- destinationName=dst_file,
+ destinationName=str(dst_file),
destinationDatacenter=dc_ref)
session._wait_for_task(move_task)
LOG.debug("File moved")
@@ -222,7 +398,7 @@ def file_exists(session, ds_browser, ds_path, file_name):
search_task = session._call_method(session._get_vim(),
"SearchDatastore_Task",
ds_browser,
- datastorePath=ds_path,
+ datastorePath=str(ds_path),
searchSpec=search_spec)
try:
task_info = session._wait_for_task(search_task)
@@ -242,7 +418,7 @@ def mkdir(session, ds_path, dc_ref):
LOG.debug("Creating directory with path %s", ds_path)
session._call_method(session._get_vim(), "MakeDirectory",
session._get_vim().get_service_content().fileManager,
- name=ds_path, datacenter=dc_ref,
+ name=str(ds_path), datacenter=dc_ref,
createParentDirectories=True)
LOG.debug("Created directory with path %s", ds_path)
@@ -256,7 +432,7 @@ def get_sub_folders(session, ds_browser, ds_path):
session._get_vim(),
"SearchDatastore_Task",
ds_browser,
- datastorePath=ds_path)
+ datastorePath=str(ds_path))
try:
task_info = session._wait_for_task(search_task)
except error_util.FileNotFoundException:
diff --git a/nova/virt/vmwareapi/error_util.py b/nova/virt/vmwareapi/error_util.py
index ba01c931d3..a61a85a251 100644
--- a/nova/virt/vmwareapi/error_util.py
+++ b/nova/virt/vmwareapi/error_util.py
@@ -17,7 +17,7 @@
Exception classes and SOAP response error checking module.
"""
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@@ -32,6 +32,7 @@
INVALID_PROPERTY = 'InvalidProperty'
NO_PERMISSION = 'NoPermission'
NOT_AUTHENTICATED = 'NotAuthenticated'
+TASK_IN_PROGRESS = 'TaskInProgress'
class VimException(Exception):
@@ -203,6 +204,10 @@ class InvalidPowerStateException(VMwareDriverException):
code = 409
+class TaskInProgress(VMwareDriverException):
+ msg_fmt = _("Virtual machine is busy.")
+
+
# Populate the fault registry with the exceptions that have
# special treatment.
_fault_classes_registry = {
@@ -215,7 +220,8 @@ class InvalidPowerStateException(VMwareDriverException):
INVALID_POWER_STATE: InvalidPowerStateException,
INVALID_PROPERTY: InvalidPropertyException,
NO_PERMISSION: NoPermissionException,
- NOT_AUTHENTICATED: NotAuthenticatedException
+ NOT_AUTHENTICATED: NotAuthenticatedException,
+ TASK_IN_PROGRESS: TaskInProgress,
}
diff --git a/nova/virt/vmwareapi/host.py b/nova/virt/vmwareapi/host.py
index b1be80803c..2213126ed1 100644
--- a/nova/virt/vmwareapi/host.py
+++ b/nova/virt/vmwareapi/host.py
@@ -21,66 +21,16 @@
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova import utils
+from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
-class Host(object):
- """Implements host related operations."""
- def __init__(self, session):
- self._session = session
-
- def host_power_action(self, host, action):
- """Reboots or shuts down the host."""
- host_mor = vm_util.get_host_ref(self._session)
- LOG.debug("%(action)s %(host)s", {'action': action, 'host': host})
- if action == "reboot":
- host_task = self._session._call_method(
- self._session._get_vim(),
- "RebootHost_Task", host_mor,
- force=False)
- elif action == "shutdown":
- host_task = self._session._call_method(
- self._session._get_vim(),
- "ShutdownHost_Task", host_mor,
- force=False)
- elif action == "startup":
- host_task = self._session._call_method(
- self._session._get_vim(),
- "PowerUpHostFromStandBy_Task", host_mor,
- timeoutSec=60)
- self._session._wait_for_task(host_task)
-
- def host_maintenance_mode(self, host, mode):
- """Start/Stop host maintenance window. On start, it triggers
- guest VMs evacuation.
- """
- host_mor = vm_util.get_host_ref(self._session)
- LOG.debug("Set maintenance mod on %(host)s to %(mode)s",
- {'host': host, 'mode': mode})
- if mode:
- host_task = self._session._call_method(
- self._session._get_vim(),
- "EnterMaintenanceMode_Task",
- host_mor, timeout=0,
- evacuatePoweredOffVms=True)
- else:
- host_task = self._session._call_method(
- self._session._get_vim(),
- "ExitMaintenanceMode_Task",
- host_mor, timeout=0)
- self._session._wait_for_task(host_task)
-
- def set_host_enabled(self, _host, enabled):
- """Sets the specified host's ability to accept new instances."""
- pass
-
-
def _get_ds_capacity_and_freespace(session, cluster=None):
try:
- ds = vm_util.get_datastore(session, cluster)
+ ds = ds_util.get_datastore(session, cluster)
return ds.capacity, ds.freespace
except exception.DatastoreNotFound:
return 0, 0
diff --git a/nova/virt/vmwareapi/imagecache.py b/nova/virt/vmwareapi/imagecache.py
index 617c78833b..5051c21085 100644
--- a/nova/virt/vmwareapi/imagecache.py
+++ b/nova/virt/vmwareapi/imagecache.py
@@ -37,7 +37,7 @@
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -63,27 +63,27 @@ def __init__(self, session, base_folder):
self._base_folder = base_folder
self._ds_browser = {}
- def _folder_delete(self, path, dc_ref):
+ def _folder_delete(self, ds_path, dc_ref):
try:
- ds_util.file_delete(self._session, path, dc_ref)
+ ds_util.file_delete(self._session, ds_path, dc_ref)
except (error_util.CannotDeleteFileException,
error_util.FileFaultException,
error_util.FileLockedException) as e:
# There may be more than one process or thread that tries
# to delete the file.
LOG.warning(_("Unable to delete %(file)s. Exception: %(ex)s"),
- {'file': path, 'ex': e})
+ {'file': ds_path, 'ex': e})
except error_util.FileNotFoundException:
- LOG.debug("File not found: %s", path)
+ LOG.debug("File not found: %s", ds_path)
def timestamp_folder_get(self, ds_path, image_id):
"""Returns the timestamp folder."""
- return '%s/%s' % (ds_path, image_id)
+ return ds_path.join(image_id)
def timestamp_cleanup(self, dc_ref, ds_browser, ds_path):
ts = self._get_timestamp(ds_browser, ds_path)
if ts:
- ts_path = '%s/%s' % (ds_path, ts)
+ ts_path = ds_path.join(ts)
LOG.debug("Timestamp path %s exists. Deleting!", ts_path)
# Image is used - no longer need timestamp folder
self._folder_delete(ts_path, dc_ref)
@@ -119,7 +119,7 @@ def _list_datastore_images(self, ds_path, datastore):
- unexplained_images
- originals
"""
- ds_browser = self._get_ds_browser(datastore['ref'])
+ ds_browser = self._get_ds_browser(datastore.ref)
originals = ds_util.get_sub_folders(self._session, ds_browser,
ds_path)
return {'unexplained_images': [],
@@ -130,17 +130,16 @@ def _age_cached_images(self, context, datastore, dc_info,
"""Ages cached images."""
age_seconds = CONF.remove_unused_original_minimum_age_seconds
unused_images = self.originals - self.used_images
- ds_browser = self._get_ds_browser(datastore['ref'])
+ ds_browser = self._get_ds_browser(datastore.ref)
for image in unused_images:
path = self.timestamp_folder_get(ds_path, image)
# Lock to ensure that the spawn will not try and access a image
# that is currently being deleted on the datastore.
- with lockutils.lock(path, lock_file_prefix='nova-vmware-ts',
+ with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts',
external=True):
ts = self._get_timestamp(ds_browser, path)
if not ts:
- ts_path = '%s/%s' % (path,
- self._get_timestamp_filename())
+ ts_path = path.join(self._get_timestamp_filename())
try:
ds_util.mkdir(self._session, ts_path, dc_info.ref)
except error_util.FileAlreadyExistsException:
@@ -148,7 +147,7 @@ def _age_cached_images(self, context, datastore, dc_info,
LOG.info(_("Image %s is no longer used by this node. "
"Pending deletion!"), image)
else:
- dt = self._get_datetime_from_filename(ts)
+ dt = self._get_datetime_from_filename(str(ts))
if timeutils.is_older_than(dt, age_seconds):
LOG.info(_("Image %s is no longer used. "
"Deleting!"), path)
@@ -159,7 +158,7 @@ def _age_cached_images(self, context, datastore, dc_info,
# the timestamp.
for image in self.used_images:
path = self.timestamp_folder_get(ds_path, image)
- with lockutils.lock(path, lock_file_prefix='nova-vmware-ts',
+ with lockutils.lock(str(path), lock_file_prefix='nova-vmware-ts',
external=True):
self.timestamp_cleanup(dc_info.ref, ds_browser,
path)
@@ -176,8 +175,7 @@ def update(self, context, instances, datastores_info):
self.used_images = set(running['used_images'].keys())
# perform the aging and image verification per datastore
for (datastore, dc_info) in datastores_info:
- ds_path = ds_util.build_datastore_path(datastore['name'],
- self._base_folder)
+ ds_path = datastore.build_path(self._base_folder)
images = self._list_datastore_images(ds_path, datastore)
self.originals = images['originals']
self._age_cached_images(context, datastore, dc_info, ds_path)
diff --git a/nova/virt/vmwareapi/io_util.py b/nova/virt/vmwareapi/io_util.py
index 5c79df8772..10132dfee0 100644
--- a/nova/virt/vmwareapi/io_util.py
+++ b/nova/virt/vmwareapi/io_util.py
@@ -24,8 +24,8 @@
from eventlet import queue
from nova import exception
+from nova.i18n import _
from nova import image
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
diff --git a/nova/virt/vmwareapi/network_util.py b/nova/virt/vmwareapi/network_util.py
index 565c45db4c..20d9596ae6 100644
--- a/nova/virt/vmwareapi/network_util.py
+++ b/nova/virt/vmwareapi/network_util.py
@@ -19,7 +19,7 @@
"""
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim_util
diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py
index f611ccf20a..c6393e5e28 100644
--- a/nova/virt/vmwareapi/vif.py
+++ b/nova/virt/vmwareapi/vif.py
@@ -18,7 +18,7 @@
from oslo.config import cfg
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import network_util
@@ -154,18 +154,33 @@ def get_network_ref(session, cluster, vif, is_neutron):
return network_ref
+def get_vif_dict(session, cluster, vif_model, is_neutron, vif):
+ mac = vif['address']
+ name = vif['network']['bridge'] or CONF.vmware.integration_bridge
+ ref = get_network_ref(session, cluster, vif, is_neutron)
+ return {'network_name': name,
+ 'mac_address': mac,
+ 'network_ref': ref,
+ 'iface_id': vif['id'],
+ 'vif_model': vif_model}
+
+
def get_vif_info(session, cluster, is_neutron, vif_model, network_info):
vif_infos = []
- if not network_info:
+ if network_info is None:
return vif_infos
for vif in network_info:
- mac_address = vif['address']
- net_name = vif['network']['bridge'] or CONF.vmware.integration_bridge
- network_ref = get_network_ref(session, cluster, vif, is_neutron)
- vif_infos.append({'network_name': net_name,
- 'mac_address': mac_address,
- 'network_ref': network_ref,
- 'iface_id': vif['id'],
- 'vif_model': vif_model
- })
+ vif_infos.append(get_vif_dict(session, cluster, vif_model,
+ is_neutron, vif))
return vif_infos
+
+
+def get_network_device(hardware_devices, mac_address):
+ """Return the network device with MAC 'mac_address'."""
+ if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
+ hardware_devices = hardware_devices.VirtualDevice
+ for device in hardware_devices:
+ if device.__class__.__name__ in vm_util.ALL_SUPPORTED_NETWORK_DEVICES:
+ if hasattr(device, 'macAddress'):
+ if device.macAddress == mac_address:
+ return device
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
index f8eb76f203..119c0fef1e 100644
--- a/nova/virt/vmwareapi/vim.py
+++ b/nova/virt/vmwareapi/vim.py
@@ -19,12 +19,14 @@
"""
import httplib
+import time
import urllib2
+import decorator
from oslo.config import cfg
import suds
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova import utils
from nova.virt.vmwareapi import error_util
@@ -41,6 +43,21 @@
CONF.register_opt(vmwareapi_wsdl_loc_opt, 'vmware')
+@decorator.decorator
+def retry_if_task_in_progress(f, *args, **kwargs):
+ retries = max(CONF.vmware.api_retry_count, 1)
+ delay = 1
+ for attempt in range(1, retries + 1):
+ if attempt != 1:
+ time.sleep(delay)
+ delay = min(2 * delay, 60)
+ try:
+ f(*args, **kwargs)
+ return
+ except error_util.TaskInProgress:
+ pass
+
+
def get_moref(value, type):
"""Get managed object reference."""
moref = suds.sudsobject.Property(value)
diff --git a/nova/virt/vmwareapi/vim_util.py b/nova/virt/vmwareapi/vim_util.py
index 313d35ffc7..e37c6cddf7 100644
--- a/nova/virt/vmwareapi/vim_util.py
+++ b/nova/virt/vmwareapi/vim_util.py
@@ -19,7 +19,7 @@
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
vmware_opts = cfg.IntOpt('maximum_objects', default=100,
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index cbeea889e5..868a8bf6ba 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -24,13 +24,12 @@
from oslo.config import cfg
from nova import exception
+from nova.i18n import _
from nova.network import model as network_model
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova import utils
from nova.virt.vmwareapi import constants
-from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim_util
@@ -93,6 +92,13 @@ def wrapper(session, name):
VNC_CONFIG_KEY = 'config.extraConfig["RemoteDisplay.vnc.port"]'
+def _iface_id_option_value(client_factory, iface_id, port_index):
+ opt = client_factory.create('ns0:OptionValue')
+ opt.key = "nvp.iface-id.%d" % port_index
+ opt.value = iface_id
+ return opt
+
+
def get_vm_create_spec(client_factory, instance, name, data_store_name,
vif_infos, os_type=constants.DEFAULT_OS_TYPE):
"""Builds the VM Create spec."""
@@ -125,7 +131,7 @@ def get_vm_create_spec(client_factory, instance, name, data_store_name,
vif_spec_list = []
for vif_info in vif_infos:
- vif_spec = create_network_spec(client_factory, vif_info)
+ vif_spec = _create_vif_spec(client_factory, vif_info)
vif_spec_list.append(vif_spec)
device_config_spec = vif_spec_list
@@ -139,14 +145,13 @@ def get_vm_create_spec(client_factory, instance, name, data_store_name,
opt.value = instance['uuid']
extra_config.append(opt)
- i = 0
+ port_index = 0
for vif_info in vif_infos:
if vif_info['iface_id']:
- opt = client_factory.create('ns0:OptionValue')
- opt.key = "nvp.iface-id.%d" % i
- opt.value = vif_info['iface_id']
- extra_config.append(opt)
- i += 1
+ extra_config.append(_iface_id_option_value(client_factory,
+ vif_info['iface_id'],
+ port_index))
+ port_index += 1
config_spec.extraConfig = extra_config
@@ -187,7 +192,7 @@ def create_controller_spec(client_factory, key,
return virtual_device_config
-def _convert_vif_model(name):
+def convert_vif_model(name):
"""Converts standard VIF_MODEL types to the internal VMware ones."""
if name == network_model.VIF_MODEL_E1000:
return 'VirtualE1000'
@@ -199,7 +204,7 @@ def _convert_vif_model(name):
return name
-def create_network_spec(client_factory, vif_info):
+def _create_vif_spec(client_factory, vif_info):
"""Builds a config spec for the addition of a new network
adapter to the VM.
"""
@@ -207,7 +212,7 @@ def create_network_spec(client_factory, vif_info):
network_spec.operation = "add"
# Keep compatible with other Hyper vif model parameter.
- vif_info['vif_model'] = _convert_vif_model(vif_info['vif_model'])
+ vif_info['vif_model'] = convert_vif_model(vif_info['vif_model'])
vif = 'ns0:' + vif_info['vif_model']
net_device = client_factory.create(vif)
@@ -260,6 +265,38 @@ def create_network_spec(client_factory, vif_info):
return network_spec
+def get_network_attach_config_spec(client_factory, vif_info, index):
+ """Builds the vif attach config spec."""
+ config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
+ vif_spec = _create_vif_spec(client_factory, vif_info)
+ config_spec.deviceChange = [vif_spec]
+ if vif_info['iface_id'] is not None:
+ config_spec.extraConfig = [_iface_id_option_value(client_factory,
+ vif_info['iface_id'],
+ index)]
+ return config_spec
+
+
+def get_network_detach_config_spec(client_factory, device, port_index):
+ """Builds the vif detach config spec."""
+ config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
+ virtual_device_config = client_factory.create(
+ 'ns0:VirtualDeviceConfigSpec')
+ virtual_device_config.operation = "remove"
+ virtual_device_config.device = device
+ config_spec.deviceChange = [virtual_device_config]
+ # If a key is already present then it cannot be deleted, only updated.
+ # This enables us to reuse this key if there is an additional
+ # attachment. The keys need to be preserved. This is due to the fact
+ # that there is logic on the ESX that does the network wiring
+ # according to these values. If they are changed then this will
+ # break networking to and from the interface.
+ config_spec.extraConfig = [_iface_id_option_value(client_factory,
+ 'free',
+ port_index)]
+ return config_spec
+
+
def get_vmdk_attach_config_spec(client_factory,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
@@ -334,6 +371,16 @@ def get_vm_extra_config_spec(client_factory, extra_opts):
return config_spec
+def get_vmdk_path(session, vm_ref, instance):
+ """Gets the vmdk file path for specified instance."""
+ hardware_devices = session._call_method(vim_util,
+ "get_dynamic_property", vm_ref, "VirtualMachine",
+ "config.hardware.device")
+ (vmdk_path, adapter_type, disk_type) = get_vmdk_path_and_adapter_type(
+ hardware_devices, uuid=instance['uuid'])
+ return vmdk_path
+
+
def get_vmdk_path_and_adapter_type(hardware_devices, uuid=None):
"""Gets the vmdk file path and the storage adapter type."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
@@ -689,6 +736,11 @@ def _get_allocated_vnc_ports(session):
return vnc_ports
+# NOTE(mdbooth): this convenience function is temporarily duplicated in
+# ds_util. The correct fix is to handle paginated results as they are returned
+# from the relevant vim_util function. However, vim_util is currently
+# effectively deprecated as we migrate to oslo.vmware. This duplication will be
+# removed when we fix it properly in oslo.vmware.
def _get_token(results):
"""Get the token from the property results."""
return getattr(results, 'token', None)
@@ -947,7 +999,7 @@ def get_stats_from_cluster(session, cluster):
for obj in result.objects:
hardware_summary = obj.propSet[0].val
runtime_summary = obj.propSet[1].val
- if (runtime_summary.inMaintenanceMode == False and
+ if (runtime_summary.inMaintenanceMode is False and
runtime_summary.connectionState == "connected"):
# Total vcpus is the sum of all pCPUs of individual hosts
# The overcommitment ratio is factored in by the scheduler
@@ -996,9 +1048,9 @@ def propset_dict(propset):
that are returned by the VMware API.
You can read more about these at:
- http://pubs.vmware.com/vsphere-51/index.jsp
- #com.vmware.wssdk.apiref.doc/
- vmodl.query.PropertyCollector.ObjectContent.html
+ | http://pubs.vmware.com/vsphere-51/index.jsp
+ | #com.vmware.wssdk.apiref.doc/
+ | vmodl.query.PropertyCollector.ObjectContent.html
:param propset: a property "set" from ObjectContent
:return: dictionary representing property set
@@ -1006,147 +1058,11 @@ def propset_dict(propset):
if propset is None:
return {}
- #TODO(hartsocks): once support for Python 2.6 is dropped
+ # TODO(hartsocks): once support for Python 2.6 is dropped
# change to {[(prop.name, prop.val) for prop in propset]}
return dict([(prop.name, prop.val) for prop in propset])
-def _select_datastore(data_stores, best_match, datastore_regex=None):
- """Find the most preferable datastore in a given RetrieveResult object.
-
- :param data_stores: a RetrieveResult object from vSphere API call
- :param best_match: the current best match for datastore
- :param datastore_regex: an optional regular expression to match names
- :return: datastore_ref, datastore_name, capacity, freespace
- """
-
- # data_stores is actually a RetrieveResult object from vSphere API call
- for obj_content in data_stores.objects:
- # the propset attribute "need not be set" by returning API
- if not hasattr(obj_content, 'propSet'):
- continue
-
- propdict = propset_dict(obj_content.propSet)
- # Local storage identifier vSphere doesn't support CIFS or
- # vfat for datastores, therefore filtered
- ds_type = propdict['summary.type']
- ds_name = propdict['summary.name']
- if ((ds_type == 'VMFS' or ds_type == 'NFS') and
- propdict.get('summary.accessible')):
- if datastore_regex is None or datastore_regex.match(ds_name):
- new_ds = ds_util.Datastore(
- ref=obj_content.obj,
- name=ds_name,
- capacity=propdict['summary.capacity'],
- freespace=propdict['summary.freeSpace'])
- # favor datastores with more free space
- if (best_match is None or
- new_ds.freespace > best_match.freespace):
- best_match = new_ds
-
- return best_match
-
-
-def get_datastore(session, cluster=None, host=None, datastore_regex=None):
- """Get the datastore list and choose the most preferable one."""
- if cluster is None and host is None:
- data_stores = session._call_method(vim_util, "get_objects",
- "Datastore", ["summary.type", "summary.name",
- "summary.capacity", "summary.freeSpace",
- "summary.accessible"])
- else:
- if cluster is not None:
- datastore_ret = session._call_method(
- vim_util,
- "get_dynamic_property", cluster,
- "ClusterComputeResource", "datastore")
- else:
- datastore_ret = session._call_method(
- vim_util,
- "get_dynamic_property", host,
- "HostSystem", "datastore")
-
- if not datastore_ret:
- raise exception.DatastoreNotFound()
- data_store_mors = datastore_ret.ManagedObjectReference
- data_stores = session._call_method(vim_util,
- "get_properties_for_a_collection_of_objects",
- "Datastore", data_store_mors,
- ["summary.type", "summary.name",
- "summary.capacity", "summary.freeSpace",
- "summary.accessible"])
- best_match = None
- while data_stores:
- best_match = _select_datastore(data_stores, best_match,
- datastore_regex)
- token = _get_token(data_stores)
- if not token:
- break
- data_stores = session._call_method(vim_util,
- "continue_to_get_objects",
- token)
- if best_match:
- return best_match
- if datastore_regex:
- raise exception.DatastoreNotFound(
- _("Datastore regex %s did not match any datastores")
- % datastore_regex.pattern)
- else:
- raise exception.DatastoreNotFound()
-
-
-def _get_allowed_datastores(data_stores, datastore_regex, allowed_types):
- allowed = []
- for obj_content in data_stores.objects:
- # the propset attribute "need not be set" by returning API
- if not hasattr(obj_content, 'propSet'):
- continue
-
- propdict = propset_dict(obj_content.propSet)
- # Local storage identifier vSphere doesn't support CIFS or
- # vfat for datastores, therefore filtered
- ds_type = propdict['summary.type']
- ds_name = propdict['summary.name']
- if (propdict['summary.accessible'] and ds_type in allowed_types):
- if datastore_regex is None or datastore_regex.match(ds_name):
- allowed.append({'ref': obj_content.obj, 'name': ds_name})
-
- return allowed
-
-
-def get_available_datastores(session, cluster=None, datastore_regex=None):
- """Get the datastore list and choose the first local storage."""
- if cluster:
- mobj = cluster
- resource_type = "ClusterComputeResource"
- else:
- mobj = get_host_ref(session)
- resource_type = "HostSystem"
- ds = session._call_method(vim_util, "get_dynamic_property", mobj,
- resource_type, "datastore")
- if not ds:
- return []
- data_store_mors = ds.ManagedObjectReference
- # NOTE(garyk): use utility method to retrieve remote objects
- data_stores = session._call_method(vim_util,
- "get_properties_for_a_collection_of_objects",
- "Datastore", data_store_mors,
- ["summary.type", "summary.name", "summary.accessible"])
-
- allowed = []
- while data_stores:
- allowed.extend(_get_allowed_datastores(data_stores, datastore_regex,
- ['VMFS', 'NFS']))
- token = _get_token(data_stores)
- if not token:
- break
-
- data_stores = session._call_method(vim_util,
- "continue_to_get_objects",
- token)
- return allowed
-
-
def get_vmdk_backed_disk_uuid(hardware_devices, volume_uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
@@ -1270,14 +1186,15 @@ def get_dict_mor(session, list_obj):
{ value = "domain-1002", _type = "ClusterComputeResource" }
Output data format:
- dict_mors = {
- 'respool-1001': { 'cluster_mor': clusterMor,
- 'res_pool_mor': resourcePoolMor,
- 'name': display_name },
- 'domain-1002': { 'cluster_mor': clusterMor,
- 'res_pool_mor': resourcePoolMor,
- 'name': display_name },
- }
+ | dict_mors = {
+ | 'respool-1001': { 'cluster_mor': clusterMor,
+ | 'res_pool_mor': resourcePoolMor,
+ | 'name': display_name },
+ | 'domain-1002': { 'cluster_mor': clusterMor,
+ | 'res_pool_mor': resourcePoolMor,
+ | 'name': display_name },
+ | }
+
"""
dict_mors = {}
for obj_ref, path in list_obj:
@@ -1419,7 +1336,8 @@ def clone_vmref_for_instance(session, instance, vm_ref, host_ref, ds_ref,
raise error_util.MissingParameter(param="vm_ref")
# Get the clone vm spec
client_factory = session._get_vim().client.factory
- rel_spec = relocate_vm_spec(client_factory, ds_ref, host_ref)
+ rel_spec = relocate_vm_spec(client_factory, ds_ref, host_ref,
+ disk_move_type='moveAllDiskBackingsAndDisallowSharing')
extra_opts = {'nvp.vm-uuid': instance['uuid']}
config_spec = get_vm_extra_config_spec(client_factory, extra_opts)
config_spec.instanceUuid = instance['uuid']
@@ -1512,11 +1430,10 @@ def power_on_instance(session, instance, vm_ref=None):
LOG.debug("VM already powered on", instance=instance)
-def get_values_from_object_properties(session, props, properties):
+def get_values_from_object_properties(session, props):
"""Get the specific values from a object list.
- The object values will be returned as a dictionary. The keys for the
- dictionary will be the 'properties'.
+ The object values will be returned as a dictionary.
"""
dictionary = {}
while props:
@@ -1531,3 +1448,62 @@ def get_values_from_object_properties(session, props, properties):
"continue_to_get_objects",
token)
return dictionary
+
+
+def _get_vm_port_indices(session, vm_ref):
+ extra_config = session._call_method(vim_util,
+ 'get_dynamic_property',
+ vm_ref, 'VirtualMachine',
+ 'config.extraConfig')
+ ports = []
+ if extra_config is not None:
+ options = extra_config.OptionValue
+ for option in options:
+ if (option.key.startswith('nvp.iface-id.') and
+ option.value != 'free'):
+ ports.append(int(option.key.split('.')[2]))
+ return ports
+
+
+def get_attach_port_index(session, vm_ref):
+ """Get the first free port index."""
+ ports = _get_vm_port_indices(session, vm_ref)
+ # No ports are configured on the VM
+ if not ports:
+ return 0
+ ports.sort()
+ configured_ports_len = len(ports)
+ # Find the first free port index
+ for port_index in range(configured_ports_len):
+ if port_index != ports[port_index]:
+ return port_index
+ return configured_ports_len
+
+
+def get_vm_detach_port_index(session, vm_ref, iface_id):
+ extra_config = session._call_method(vim_util,
+ 'get_dynamic_property',
+ vm_ref, 'VirtualMachine',
+ 'config.extraConfig')
+ if extra_config is not None:
+ options = extra_config.OptionValue
+ for option in options:
+ if (option.key.startswith('nvp.iface-id.') and
+ option.value == iface_id):
+ return int(option.key.split('.')[2])
+
+
+def power_off_instance(session, instance, vm_ref=None):
+ """Power off the specified instance."""
+
+ if vm_ref is None:
+ vm_ref = get_vm_ref(session, instance)
+
+ LOG.debug("Powering off the VM", instance=instance)
+ try:
+ poweroff_task = session._call_method(session._get_vim(),
+ "PowerOffVM_Task", vm_ref)
+ session._wait_for_task(poweroff_task)
+ LOG.debug("Powered off the VM", instance=instance)
+ except error_util.InvalidPowerStateException:
+ LOG.debug("VM already powered off", instance=instance)
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 50057315dc..4bab4427b7 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -32,15 +32,15 @@
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
+from nova.i18n import _, _LE
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _, _LE
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
-from nova.openstack.common import strutils
from nova.openstack.common import units
from nova.openstack.common import uuidutils
from nova import utils
from nova.virt import configdrive
+from nova.virt import diagnostics
from nova.virt import driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import ds_util
@@ -66,8 +66,6 @@
'poweredOn': power_state.RUNNING,
'suspended': power_state.SUSPENDED}
-VMWARE_LINKED_CLONE = 'vmware_linked_clone'
-
RESIZE_TOTAL_STEPS = 4
DcInfo = collections.namedtuple('DcInfo',
@@ -133,7 +131,8 @@ def _extend_virtual_disk(self, instance, requested_size, name, dc_ref):
# Clean up files created during the extend operation
files = [name.replace(".vmdk", "-flat.vmdk"), name]
for file in files:
- self._delete_datastore_file(instance, file, dc_ref)
+ ds_path = ds_util.DatastorePath.parse(file)
+ self._delete_datastore_file(instance, ds_path, dc_ref)
LOG.debug("Extended root virtual disk")
@@ -150,14 +149,57 @@ def _delete_datastore_file(self, instance, datastore_path, dc_ref):
exc_info=True)
def _get_vmdk_path(self, ds_name, folder, name):
- path = "%s/%s.vmdk" % (folder, name)
- return ds_util.build_datastore_path(ds_name, path)
+ return str(ds_util.DatastorePath(ds_name, folder, '%s.vmdk' % name))
+
+ def _extend_if_required(self, dc_info, image_info, instance,
+ root_vmdk_path):
+ """Increase the size of the root vmdk if necessary."""
+ if instance.root_gb > image_info.file_size_in_gb:
+ size_in_kb = instance.root_gb * units.Mi
+ self._extend_virtual_disk(instance, size_in_kb,
+ root_vmdk_path, dc_info.ref)
+
+ def _configure_config_drive(self, instance, vm_ref, dc_info, datastore,
+ injected_files, admin_password):
+ session_vim = self._session._get_vim()
+ cookies = session_vim.client.options.transport.cookiejar
+
+ uploaded_iso_path = self._create_config_drive(instance,
+ injected_files,
+ admin_password,
+ datastore.name,
+ dc_info.name,
+ instance['uuid'],
+ cookies)
+ uploaded_iso_path = datastore.build_path(uploaded_iso_path)
+ self._attach_cdrom_to_vm(
+ vm_ref, instance,
+ datastore.ref,
+ str(uploaded_iso_path))
+
+ def build_virtual_machine(self, instance, instance_name, image_info,
+ dc_info, datastore, network_info):
+ node_mo_id = vm_util.get_mo_id_from_instance(instance)
+ res_pool_ref = vm_util.get_res_pool_ref(self._session,
+ self._cluster, node_mo_id)
+ vif_infos = vmwarevif.get_vif_info(self._session,
+ self._cluster,
+ utils.is_neutron(),
+ image_info.vif_model,
+ network_info)
- def _get_disk_format(self, image_meta):
- disk_format = image_meta.get('disk_format')
- if disk_format not in ['iso', 'vmdk', None]:
- raise exception.InvalidDiskFormat(disk_format=disk_format)
- return (disk_format, disk_format == 'iso')
+ # Get the create vm config spec
+ client_factory = self._session._get_vim().client.factory
+ config_spec = vm_util.get_vm_create_spec(client_factory,
+ instance,
+ instance_name,
+ datastore.name,
+ vif_infos,
+ image_info.os_type)
+ # Create the VM
+ vm_ref = vm_util.create_vm(self._session, instance, dc_info.vmFolder,
+ config_spec, res_pool_ref)
+ return vm_ref
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None,
@@ -166,108 +208,54 @@ def spawn(self, context, instance, image_meta, injected_files,
Steps followed are:
- 1. Create a VM with no disk and the specifics in the instance object
+ #. Create a VM with no disk and the specifics in the instance object
like RAM size.
- 2. For flat disk
- 2.1. Create a dummy vmdk of the size of the disk file that is to be
- uploaded. This is required just to create the metadata file.
- 2.2. Delete the -flat.vmdk file created in the above step and retain
- the metadata .vmdk file.
- 2.3. Upload the disk file.
- 3. For sparse disk
- 3.1. Upload the disk file to a -sparse.vmdk file.
- 3.2. Copy/Clone the -sparse.vmdk file to a thin vmdk.
- 3.3. Delete the -sparse.vmdk file.
- 4. Attach the disk to the VM by reconfiguring the same.
- 5. Power on the VM.
+ #. For flat disk
+
+ #. Create a dummy vmdk of the size of the disk file that is to be
+ uploaded. This is required just to create the metadata file.
+ #. Delete the -flat.vmdk file created in the above step and retain
+ the metadata .vmdk file.
+ #. Upload the disk file.
+
+ #. For sparse disk
+
+ #. Upload the disk file to a -sparse.vmdk file.
+ #. Copy/Clone the -sparse.vmdk file to a thin vmdk.
+ #. Delete the -sparse.vmdk file.
+
+ #. Attach the disk to the VM by reconfiguring the same.
+ #. Power on the VM.
+
"""
- ebs_root = False
- if block_device_info:
- msg = "Block device information present: %s" % block_device_info
- # NOTE(mriedem): block_device_info can contain an auth_password
- # so we have to scrub the message before logging it.
- LOG.debug(logging.mask_password(msg), instance=instance)
- block_device_mapping = driver.block_device_info_get_mapping(
- block_device_info)
- if block_device_mapping:
- ebs_root = True
- (file_type, is_iso) = self._get_disk_format(image_meta)
+ # NOTE(hartsocks): some of the logic below relies on instance_name
+ # even when it is not set by the caller.
+ if instance_name is None:
+ instance_name = instance.uuid
client_factory = self._session._get_vim().client.factory
- datastore = vm_util.get_datastore(
+ datastore = ds_util.get_datastore(
self._session, self._cluster,
datastore_regex=self._datastore_regex)
dc_info = self.get_datacenter_ref_and_name(datastore.ref)
- #TODO(hartsocks): this pattern is confusing, reimplement as methods
- # The use of nested functions in this file makes for a confusing and
- # hard to maintain file. At some future date, refactor this method to
- # be a full-fledged method. This will also make unit testing easier.
- def _get_image_properties(root_size):
- """Get the Size of the flat vmdk file that is there on the storage
- repository.
- """
- image_ref = instance.get('image_ref')
- if image_ref:
- _image_info = vmware_images.get_vmdk_size_and_properties(
- context, image_ref, instance)
- else:
- # The case that the image may be booted from a volume
- _image_info = (root_size, {})
-
- image_size, image_properties = _image_info
- vmdk_file_size_in_kb = int(image_size) / 1024
- os_type = image_properties.get("vmware_ostype",
- constants.DEFAULT_OS_TYPE)
- adapter_type = image_properties.get("vmware_adaptertype",
- constants.DEFAULT_ADAPTER_TYPE)
- disk_type = image_properties.get("vmware_disktype",
- constants.DEFAULT_DISK_TYPE)
- # Get the network card type from the image properties.
- vif_model = image_properties.get("hw_vif_model",
- constants.DEFAULT_VIF_MODEL)
-
- # Fetch the image_linked_clone data here. It is retrieved
- # with the above network based API call. To retrieve it
- # later will necessitate additional network calls using the
- # identical method. Consider this a cache.
- image_linked_clone = image_properties.get(VMWARE_LINKED_CLONE)
-
- return (vmdk_file_size_in_kb, os_type, adapter_type, disk_type,
- vif_model, image_linked_clone)
-
- root_gb = instance['root_gb']
- root_gb_in_kb = root_gb * units.Mi
-
- (vmdk_file_size_in_kb, os_type, adapter_type, disk_type, vif_model,
- image_linked_clone) = _get_image_properties(root_gb_in_kb)
-
- if root_gb_in_kb and vmdk_file_size_in_kb > root_gb_in_kb:
+ image_info = vmware_images.VMwareImage.from_image(instance.image_ref,
+ image_meta)
+ if (instance.root_gb != 0 and
+ image_info.file_size_in_gb > instance.root_gb):
reason = _("Image disk size greater than requested disk size")
- raise exception.InstanceUnacceptable(instance_id=instance['uuid'],
+ raise exception.InstanceUnacceptable(instance_id=instance.uuid,
reason=reason)
- node_mo_id = vm_util.get_mo_id_from_instance(instance)
- res_pool_ref = vm_util.get_res_pool_ref(self._session,
- self._cluster, node_mo_id)
-
- vif_infos = vmwarevif.get_vif_info(self._session, self._cluster,
- utils.is_neutron(), vif_model,
- network_info)
-
- # Get the instance name. In some cases this may differ from the 'uuid',
- # for example when the spawn of a rescue instance takes place.
- if not instance_name:
- instance_name = instance['uuid']
-
- # Create the VM
- config_spec = vm_util.get_vm_create_spec(
- client_factory, instance, instance_name,
- datastore.name, vif_infos, os_type)
-
- vm_ref = vm_util.create_vm(self._session, instance, dc_info.vmFolder,
- config_spec, res_pool_ref)
+ # Creates the virtual machine. The virtual machine reference returned
+ # is unique within Virtual Center.
+ vm_ref = self.build_virtual_machine(instance,
+ instance_name,
+ image_info,
+ dc_info,
+ datastore,
+ network_info)
# Cache the vm_ref. This saves a remote call to the VC. This uses the
# instance_name. This covers all use cases including rescue and resize.
@@ -283,37 +271,60 @@ def _get_image_properties(root_size):
vnc_port = vm_util.get_vnc_port(self._session)
self._set_vnc_config(client_factory, instance, vnc_port)
- if not ebs_root:
- # this logic allows for instances or images to decide
- # for themselves which strategy is best for them.
+ block_device_mapping = []
+ if block_device_info is not None:
+ block_device_mapping = driver.block_device_info_get_mapping(
+ block_device_info)
+
+ # NOTE(mdbooth): the logic here is that we ignore the image if there
+ # are block device mappings. This behaviour is incorrect, and a bug in
+ # the driver. We should be able to accept an image and block device
+ # mappings.
+ if len(block_device_mapping) > 0:
+ msg = "Block device information present: %s" % block_device_info
+ # NOTE(mriedem): block_device_info can contain an auth_password
+ # so we have to scrub the message before logging it.
+ LOG.debug(logging.mask_password(msg), instance=instance)
+
+ for root_disk in block_device_mapping:
+ connection_info = root_disk['connection_info']
+ # TODO(hartsocks): instance is unnecessary, remove it
+ # we still use instance in many locations for no other purpose
+ # than logging, can we simplify this?
+ self._volumeops.attach_root_volume(connection_info, instance,
+ self._default_root_device,
+ datastore.ref)
+ else:
+ # TODO(hartsocks): Refactor this section image handling section.
+ # The next section handles manipulating various image types
+ # as well as preparing those image's virtual disks for mounting
+ # in our virtual machine.
- linked_clone = VMwareVMOps.decide_linked_clone(
- image_linked_clone,
- CONF.vmware.use_linked_clone
- )
- upload_name = instance['image_ref']
+ upload_name = instance.image_ref
upload_folder = '%s/%s' % (self._base_folder, upload_name)
# The vmdk meta-data file
uploaded_file_path = str(datastore.build_path(
- upload_folder, "%s.%s" % (upload_name, file_type)))
+ upload_folder,
+ "%s.%s" % (upload_name, image_info.file_type)))
session_vim = self._session._get_vim()
cookies = session_vim.client.options.transport.cookiejar
ds_browser = self._get_ds_browser(datastore.ref)
- upload_file_name = upload_name + ".%s" % file_type
+ upload_file_name = upload_name + ".%s" % image_info.file_type
# Check if the timestamp file exists - if so then delete it. This
# will ensure that the aging will not delete a cache image if it
# is going to be used now.
if CONF.remove_unused_base_images:
- ds_path = str(datastore.build_path(self._base_folder))
+ ds_path = datastore.build_path(self._base_folder)
path = self._imagecache.timestamp_folder_get(ds_path,
upload_name)
# Lock to ensure that the spawn will not try and access a image
# that is currently being deleted on the datastore.
- with lockutils.lock(path, lock_file_prefix='nova-vmware-ts',
+ with lockutils.lock(str(path),
+ lock_file_prefix='nova-vmware-ts',
external=True):
self._imagecache.timestamp_cleanup(dc_info.ref, ds_browser,
path)
@@ -345,27 +356,27 @@ def _get_image_properties(root_size):
upload_path_loc = datastore.build_path(
upload_folder, upload_file_name)
upload_rel_path = upload_path_loc.rel_path
- if not is_iso:
- if disk_type != "sparse":
+ if not image_info.is_iso:
+ if not image_info.is_sparse:
# Create a flat virtual disk and retain the metadata
# file. This will be done in the unique temporary
# directory.
ds_util.mkdir(
self._session,
- str(datastore.build_path(upload_folder)),
+ datastore.build_path(upload_folder),
dc_info.ref)
LOG.debug("Create virtual disk on %s",
datastore.name, instance=instance)
vm_util.create_virtual_disk(self._session,
dc_info.ref,
- adapter_type,
- disk_type,
+ image_info.adapter_type,
+ image_info.disk_type,
str(upload_path_loc),
- vmdk_file_size_in_kb)
+ image_info.file_size_in_kb)
LOG.debug("Virtual disk created on %s.",
datastore.name, instance=instance)
self._delete_datastore_file(instance,
- str(flat_ds_loc),
+ flat_ds_loc,
dc_info.ref)
upload_rel_path = flat_ds_loc.rel_path
else:
@@ -379,22 +390,23 @@ def _get_image_properties(root_size):
upload_rel_path,
cookies=cookies)
- if not is_iso and disk_type == "sparse":
+ if not image_info.is_iso and image_info.is_sparse:
# Copy the sparse virtual disk to a thin virtual disk.
disk_type = "thin"
- copy_spec = self.get_copy_virtual_disk_spec(client_factory,
- adapter_type,
- disk_type)
+ copy_spec = self.get_copy_virtual_disk_spec(
+ client_factory,
+ image_info.adapter_type,
+ disk_type)
vm_util.copy_virtual_disk(self._session, dc_info.ref,
str(sparse_ds_loc),
str(upload_path_loc),
copy_spec)
self._delete_datastore_file(instance,
- str(sparse_ds_loc),
+ sparse_ds_loc,
dc_info.ref)
base_folder = '%s/%s' % (self._base_folder, upload_name)
- dest_folder = str(datastore.build_path(base_folder))
- src_folder = str(datastore.build_path(upload_folder))
+ dest_folder = datastore.build_path(base_folder)
+ src_folder = datastore.build_path(upload_folder)
try:
ds_util.file_move(self._session, dc_info.ref,
src_folder, dest_folder)
@@ -407,27 +419,24 @@ def _get_image_properties(root_size):
# Delete the temp upload folder
self._delete_datastore_file(instance,
- str(datastore.build_path(
- tmp_upload_folder)),
+ datastore.build_path(
+ tmp_upload_folder),
dc_info.ref)
- else:
- # linked clone base disk exists
- if disk_type == "sparse":
- disk_type = "thin"
- if is_iso:
- if root_gb_in_kb:
+ if image_info.is_iso:
+ if instance.root_gb != 0:
dest_vmdk_path = self._get_vmdk_path(datastore.name,
- instance['uuid'], instance_name)
+ instance.uuid,
+ instance_name)
# Create the blank virtual disk for the VM
LOG.debug("Create blank virtual disk on %s",
datastore.name, instance=instance)
vm_util.create_virtual_disk(self._session,
dc_info.ref,
- adapter_type,
- disk_type,
+ image_info.adapter_type,
+ image_info.disk_type,
dest_vmdk_path,
- root_gb_in_kb)
+ image_info.file_size_in_kb)
LOG.debug("Blank virtual disk created on %s.",
datastore.name, instance=instance)
root_vmdk_path = dest_vmdk_path
@@ -435,27 +444,31 @@ def _get_image_properties(root_size):
root_vmdk_path = None
else:
# Extend the disk size if necessary
- if not linked_clone:
+ if not image_info.linked_clone:
# If we are not using linked_clone, copy the image from
# the cache into the instance directory. If we are using
# linked clone it is references from the cache directory
dest_vmdk_path = self._get_vmdk_path(datastore.name,
instance_name, instance_name)
- copy_spec = self.get_copy_virtual_disk_spec(client_factory,
- adapter_type,
- disk_type)
+ copy_spec = self.get_copy_virtual_disk_spec(
+ client_factory,
+ image_info.adapter_type,
+ image_info.disk_type)
vm_util.copy_virtual_disk(self._session,
dc_info.ref,
uploaded_file_path,
dest_vmdk_path, copy_spec)
root_vmdk_path = dest_vmdk_path
- if root_gb_in_kb > vmdk_file_size_in_kb:
- self._extend_virtual_disk(instance, root_gb_in_kb,
- root_vmdk_path, dc_info.ref)
+ self._extend_if_required(dc_info, image_info, instance,
+ root_vmdk_path)
else:
upload_folder = '%s/%s' % (self._base_folder, upload_name)
- root_vmdk_name = "%s.%s.vmdk" % (upload_name, root_gb)
+ if instance.root_gb:
+ root_vmdk_name = "%s.%s.vmdk" % (upload_name,
+ instance.root_gb)
+ else:
+ root_vmdk_name = "%s.vmdk" % upload_name
root_vmdk_path = str(datastore.build_path(
upload_folder, root_vmdk_name))
@@ -479,10 +492,12 @@ def _get_image_properties(root_size):
upload_folder,
root_vmdk_name):
LOG.debug("Copying root disk of size %sGb",
- root_gb)
+ instance.root_gb)
copy_spec = self.get_copy_virtual_disk_spec(
- client_factory, adapter_type, disk_type)
+ client_factory,
+ image_info.adapter_type,
+ image_info.disk_type)
# Create a copy of the base image, ensuring we
# clean up on failure
@@ -513,48 +528,31 @@ def _get_image_properties(root_size):
# Resize the copy to the appropriate size. No need
# for cleanup up here, as _extend_virtual_disk
# already does it
- if root_gb_in_kb > vmdk_file_size_in_kb:
- self._extend_virtual_disk(instance,
- root_gb_in_kb,
- root_vmdk_path,
- dc_info.ref)
+ self._extend_if_required(dc_info, image_info,
+ instance, root_vmdk_path)
# Attach the root disk to the VM.
- if root_vmdk_path:
+ if root_vmdk_path is not None:
self._volumeops.attach_disk_to_vm(
- vm_ref, instance,
- adapter_type, disk_type, root_vmdk_path,
- root_gb_in_kb, linked_clone)
-
- if is_iso:
+ vm_ref,
+ instance,
+ image_info.adapter_type,
+ image_info.disk_type,
+ root_vmdk_path,
+ instance.root_gb * units.Mi,
+ image_info.linked_clone)
+
+ if image_info.is_iso:
self._attach_cdrom_to_vm(
vm_ref, instance,
datastore.ref,
uploaded_file_path)
if configdrive.required_by(instance):
- uploaded_iso_path = self._create_config_drive(instance,
- injected_files,
- admin_password,
- datastore.name,
- dc_info.name,
- instance['uuid'],
- cookies)
- uploaded_iso_path = ds_util.build_datastore_path(
- datastore.name,
- uploaded_iso_path)
- self._attach_cdrom_to_vm(
- vm_ref, instance,
- datastore.ref,
- uploaded_iso_path)
+ self._configure_config_drive(
+ instance, vm_ref, dc_info, datastore, injected_files,
+ admin_password)
- else:
- # Attach the root disk to the VM.
- for root_disk in block_device_mapping:
- connection_info = root_disk['connection_info']
- self._volumeops.attach_root_volume(connection_info, instance,
- self._default_root_device,
- datastore.ref)
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
@@ -594,9 +592,8 @@ def _create_config_drive(self, instance, injected_files, admin_password,
e, instance=instance)
def _attach_cdrom_to_vm(self, vm_ref, instance,
- datastore, file_path):
+ datastore, file_path):
"""Attach cdrom to VM by reconfiguration."""
- instance_name = instance['name']
client_factory = self._session._get_vim().client.factory
devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -612,59 +609,11 @@ def _attach_cdrom_to_vm(self, vm_ref, instance,
if controller_spec:
cdrom_attach_config_spec.deviceChange.append(controller_spec)
- LOG.debug("Reconfiguring VM instance %(instance_name)s to attach "
- "cdrom %(file_path)s",
- {'instance_name': instance_name, 'file_path': file_path})
+ LOG.debug("Reconfiguring VM instance to attach cdrom %s",
+ file_path, instance=instance)
vm_util.reconfigure_vm(self._session, vm_ref, cdrom_attach_config_spec)
- LOG.debug("Reconfigured VM instance %(instance_name)s to attach "
- "cdrom %(file_path)s",
- {'instance_name': instance_name, 'file_path': file_path})
-
- @staticmethod
- def decide_linked_clone(image_linked_clone, global_linked_clone):
- """Explicit decision logic: whether to use linked clone on a vmdk.
-
- This is *override* logic not boolean logic.
-
- 1. let the image over-ride if set at all
- 2. default to the global setting
-
- In math terms, I need to allow:
- glance image to override global config.
-
- That is g vs c. "g" for glance. "c" for Config.
-
- So, I need g=True vs c=False to be True.
- And, I need g=False vs c=True to be False.
- And, I need g=None vs c=True to be True.
-
- Some images maybe independently best tuned for use_linked_clone=True
- saving datastorage space. Alternatively a whole OpenStack install may
- be tuned to performance use_linked_clone=False but a single image
- in this environment may be best configured to save storage space and
- set use_linked_clone=True only for itself.
-
- The point is: let each layer of control override the layer beneath it.
-
- rationale:
- For technical discussion on the clone strategies and their trade-offs
- see: https://www.vmware.com/support/ws5/doc/ws_clone_typeofclone.html
-
- :param image_linked_clone: boolean or string or None
- :param global_linked_clone: boolean or string or None
- :return: Boolean
- """
-
- value = None
-
- # Consider the values in order of override.
- if image_linked_clone is not None:
- value = image_linked_clone
- else:
- # this will never be not-set by this point.
- value = global_linked_clone
-
- return strutils.bool_from_string(value)
+ LOG.debug("Reconfigured VM instance to attach cdrom %s",
+ file_path, instance=instance)
def get_copy_virtual_disk_spec(self, client_factory, adapter_type,
disk_type):
@@ -677,7 +626,7 @@ def _create_vm_snapshot(self, instance, vm_ref):
snapshot_task = self._session._call_method(
self._session._get_vim(),
"CreateSnapshot_Task", vm_ref,
- name="%s-snapshot" % instance['uuid'],
+ name="%s-snapshot" % instance.uuid,
description="Taking Snapshot of the VM",
memory=False,
quiesce=True)
@@ -689,6 +638,7 @@ def _create_vm_snapshot(self, instance, vm_ref):
snapshot = task_info.result
return snapshot
+ @vim.retry_if_task_in_progress
def _delete_vm_snapshot(self, instance, vm_ref, snapshot):
LOG.debug("Deleting Snapshot of the VM instance", instance=instance)
delete_snapshot_task = self._session._call_method(
@@ -724,7 +674,7 @@ def _get_vm_and_vmdk_attribs():
"VirtualMachine", "config.hardware.device")
(vmdk_file_path_before_snapshot, adapter_type,
disk_type) = vm_util.get_vmdk_path_and_adapter_type(
- hw_devices, uuid=instance['uuid'])
+ hw_devices, uuid=instance.uuid)
if not vmdk_file_path_before_snapshot:
LOG.debug("No root disk defined. Unable to snapshot.")
raise error_util.NoRootDiskDefined()
@@ -761,10 +711,10 @@ def _check_if_tmp_folder_exists():
# will be copied to. A random name is chosen so that we don't have
# name clashes.
random_name = uuidutils.generate_uuid()
- dest_vmdk_file_path = ds_util.build_datastore_path(datastore_name,
- "%s/%s.vmdk" % (self._tmp_folder, random_name))
- dest_vmdk_data_file_path = ds_util.build_datastore_path(datastore_name,
- "%s/%s-flat.vmdk" % (self._tmp_folder, random_name))
+ dest_vmdk_file_path = ds_util.DatastorePath(
+ datastore_name, self._tmp_folder, "%s.vmdk" % random_name)
+ dest_vmdk_data_file_path = ds_util.DatastorePath(
+ datastore_name, self._tmp_folder, "%s-flat.vmdk" % random_name)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
def _copy_vmdk_content():
@@ -781,7 +731,7 @@ def _copy_vmdk_content():
service_content.virtualDiskManager,
sourceName=vmdk_file_path_before_snapshot,
sourceDatacenter=dc_info.ref,
- destName=dest_vmdk_file_path,
+ destName=str(dest_vmdk_file_path),
destDatacenter=dc_info.ref,
destSpec=copy_spec,
force=False)
@@ -791,7 +741,6 @@ def _copy_vmdk_content():
instance=instance)
_copy_vmdk_content()
- # Note(vui): handle snapshot cleanup on exceptions.
self._delete_vm_snapshot(instance, vm_ref, snapshot)
cookies = self._session._get_vim().client.options.transport.cookiejar
@@ -842,8 +791,7 @@ def reboot(self, instance, network_info):
props = self._session._call_method(vim_util, "get_object_properties",
None, vm_ref, "VirtualMachine",
lst_properties)
- query = vm_util.get_values_from_object_properties(self._session, props,
- lst_properties)
+ query = vm_util.get_values_from_object_properties(self._session, props)
pwr_state = query['runtime.powerState']
tools_status = query['summary.guest.toolsStatus']
tools_running_status = query['summary.guest.toolsRunningStatus']
@@ -868,12 +816,12 @@ def reboot(self, instance, network_info):
self._session._wait_for_task(reset_task)
LOG.debug("Did hard reboot of VM", instance=instance)
- def _destroy_instance(self, instance, network_info, destroy_disks=True,
+ def _destroy_instance(self, instance, destroy_disks=True,
instance_name=None):
# Destroy a VM instance
# Get the instance name. In some cases this may differ from the 'uuid',
# for example when the spawn of a rescue instance takes place.
- if not instance_name:
+ if instance_name is None:
instance_name = instance['uuid']
try:
vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)
@@ -887,7 +835,7 @@ def _destroy_instance(self, instance, network_info, destroy_disks=True,
"get_object_properties",
None, vm_ref, "VirtualMachine", lst_properties)
query = vm_util.get_values_from_object_properties(
- self._session, props, lst_properties)
+ self._session, props)
pwr_state = query['runtime.powerState']
vm_config_pathname = query['config.files.vmPathName']
vm_ds_path = None
@@ -896,12 +844,7 @@ def _destroy_instance(self, instance, network_info, destroy_disks=True,
# Power off the VM if it is in PoweredOn state.
if pwr_state == "poweredOn":
- LOG.debug("Powering off the VM", instance=instance)
- poweroff_task = self._session._call_method(
- self._session._get_vim(),
- "PowerOffVM_Task", vm_ref)
- self._session._wait_for_task(poweroff_task)
- LOG.debug("Powered off the VM", instance=instance)
+ vm_util.power_off_instance(self._session, instance, vm_ref)
# Un-register the VM
try:
@@ -917,7 +860,7 @@ def _destroy_instance(self, instance, network_info, destroy_disks=True,
# the datastore.
if destroy_disks and vm_ds_path:
try:
- dir_ds_compliant_path = str(vm_ds_path.parent)
+ dir_ds_compliant_path = vm_ds_path.parent
LOG.debug("Deleting contents of the VM from "
"datastore %(datastore_name)s",
{'datastore_name': vm_ds_path.datastore},
@@ -941,7 +884,7 @@ def _destroy_instance(self, instance, network_info, destroy_disks=True,
finally:
vm_util.vm_ref_cache_delete(instance_name)
- def destroy(self, instance, network_info, destroy_disks=True):
+ def destroy(self, instance, destroy_disks=True):
"""Destroy a VM instance.
Steps followed for each VM are:
@@ -958,11 +901,10 @@ def destroy(self, instance, network_info, destroy_disks=True):
LOG.debug("Rescue VM destroyed", instance=instance)
except Exception:
rescue_name = instance['uuid'] + self._rescue_suffix
- self._destroy_instance(instance, network_info,
+ self._destroy_instance(instance,
destroy_disks=destroy_disks,
instance_name=rescue_name)
- self._destroy_instance(instance, network_info,
- destroy_disks=destroy_disks)
+ self._destroy_instance(instance, destroy_disks=destroy_disks)
LOG.debug("Instance destroyed", instance=instance)
def pause(self, instance):
@@ -1063,50 +1005,24 @@ def unrescue(self, instance, power_on=True):
"get_dynamic_property", vm_rescue_ref,
"VirtualMachine", "config.hardware.device")
device = vm_util.get_vmdk_volume_disk(hardware_devices, path=vmdk_path)
- self._power_off_vm_ref(vm_rescue_ref)
+ vm_util.power_off_instance(self._session, r_instance, vm_rescue_ref)
self._volumeops.detach_disk_from_vm(vm_rescue_ref, r_instance, device)
- self._destroy_instance(r_instance, None, instance_name=instance_name)
+ self._destroy_instance(r_instance, instance_name=instance_name)
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
- def _power_off_vm_ref(self, vm_ref):
- """Power off the specifed vm.
-
- :param vm_ref: a reference object to the VM.
- """
- poweroff_task = self._session._call_method(
- self._session._get_vim(),
- "PowerOffVM_Task", vm_ref)
- self._session._wait_for_task(poweroff_task)
-
def power_off(self, instance):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
"""
- vm_ref = vm_util.get_vm_ref(self._session, instance)
-
- pwr_state = self._session._call_method(vim_util,
- "get_dynamic_property", vm_ref,
- "VirtualMachine", "runtime.powerState")
- # Only PoweredOn VMs can be powered off.
- if pwr_state == "poweredOn":
- LOG.debug("Powering off the VM", instance=instance)
- self._power_off_vm_ref(vm_ref)
- LOG.debug("Powered off the VM", instance=instance)
- # Raise Exception if VM is suspended
- elif pwr_state == "suspended":
- reason = _("instance is suspended and cannot be powered off.")
- raise exception.InstancePowerOffFailure(reason=reason)
- else:
- LOG.debug("VM was already in powered off state. So returning "
- "without doing anything", instance=instance)
+ vm_util.power_off_instance(self._session, instance)
def power_on(self, instance):
vm_util.power_on_instance(self._session, instance)
def _get_orig_vm_name_label(self, instance):
- return instance['uuid'] + '-orig'
+ return instance.uuid + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
@@ -1132,6 +1048,12 @@ def migrate_disk_and_power_off(self, context, instance, dest,
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
+ # Checks if the migration needs a disk resize down.
+ if flavor['root_gb'] < instance['root_gb']:
+ reason = _("Unable to shrink disk.")
+ raise exception.InstanceFaultRollback(
+ exception.ResizeError(reason=reason))
+
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
@@ -1156,7 +1078,7 @@ def migrate_disk_and_power_off(self, context, instance, dest,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
- ds_ref = vm_util.get_datastore(
+ ds_ref = ds_util.get_datastore(
self._session, self._cluster, host_ref,
datastore_regex=self._datastore_regex).ref
dc_info = self.get_datacenter_ref_and_name(ds_ref)
@@ -1170,11 +1092,11 @@ def migrate_disk_and_power_off(self, context, instance, dest,
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# Destroy the original VM. The vm_ref needs to be searched using the
- # instance['uuid'] + self._migrate_suffix as the identifier. We will
+ # instance.uuid + self._migrate_suffix as the identifier. We will
# not get the vm when searched using the instanceUuid but rather will
# be found using the uuid buried in the extraConfig
vm_ref = vm_util.search_vm_ref_by_identifier(self._session,
- instance['uuid'] + self._migrate_suffix)
+ instance.uuid + self._migrate_suffix)
if vm_ref is None:
LOG.debug("instance not present", instance=instance)
return
@@ -1210,6 +1132,20 @@ def finish_migration(self, context, migration, instance, disk_info,
instance)
vm_util.reconfigure_vm(self._session, vm_ref, vm_resize_spec)
+ # Resize the disk (if larger)
+ old_root_gb = instance.system_metadata['old_instance_type_root_gb']
+ if instance['root_gb'] > int(old_root_gb):
+ root_disk_in_kb = instance['root_gb'] * units.Mi
+ vmdk_path = vm_util.get_vmdk_path(self._session, vm_ref,
+ instance)
+ data_store_ref = ds_util.get_datastore(self._session,
+ self._cluster, datastore_regex=self._datastore_regex).ref
+ dc_info = self.get_datacenter_ref_and_name(data_store_ref)
+ self._extend_virtual_disk(instance, root_disk_in_kb, vmdk_path,
+ dc_info.ref)
+
+ # TODO(ericwb): add extend for ephemeral disk
+
# 4. Start VM
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
@@ -1267,7 +1203,7 @@ def get_info(self, instance):
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(
- self._session, vm_props, lst_properties)
+ self._session, vm_props)
max_mem = int(query['summary.config.memorySizeMB']) * 1024
return {'state': VMWARE_POWER_STATES[query['runtime.powerState']],
'max_mem': max_mem,
@@ -1275,7 +1211,7 @@ def get_info(self, instance):
'num_cpu': int(query['summary.config.numCpu']),
'cpu_time': 0}
- def get_diagnostics(self, instance):
+ def _get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
lst_properties = ["summary.config",
@@ -1285,16 +1221,38 @@ def get_diagnostics(self, instance):
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
query = vm_util.get_values_from_object_properties(self._session,
- vm_props,
- lst_properties)
+ vm_props)
data = {}
# All of values received are objects. Convert them to dictionaries
for value in query.values():
prop_dict = vim.object_to_dict(value, list_depth=1)
data.update(prop_dict)
+ return data
+
+ def get_diagnostics(self, instance):
+ """Return data about VM diagnostics."""
+ data = self._get_diagnostics(instance)
# Add a namespace to all of the diagnostsics
return dict([('vmware:' + k, v) for k, v in data.items()])
+ def get_instance_diagnostics(self, instance):
+ """Return data about VM diagnostics."""
+ data = self._get_diagnostics(instance)
+ state = data.get('powerState')
+ if state:
+ state = power_state.STATE_MAP[VMWARE_POWER_STATES[state]]
+ uptime = data.get('uptimeSeconds', 0)
+ config_drive = configdrive.required_by(instance)
+ diags = diagnostics.Diagnostics(state=state,
+ driver='vmwareapi',
+ config_drive=config_drive,
+ hypervisor_os='esxi',
+ uptime=uptime)
+ diags.memory_details.maximum = data.get('memorySizeMB', 0)
+ diags.memory_details.used = data.get('guestMemoryUsage', 0)
+ # TODO(garyk): add in cpu, nic and disk stats
+ return diags
+
def _get_vnc_console_connection(self, instance):
"""Return connection info for a vnc console."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
@@ -1430,7 +1388,7 @@ def _create_folder_if_missing(self, ds_name, ds_ref, folder):
exists. If this throws and exception 'FileAlreadyExistsException'
then the folder already exists on the datastore.
"""
- path = ds_util.build_datastore_path(ds_name, folder)
+ path = ds_util.DatastorePath(ds_name, folder)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
try:
ds_util.mkdir(self._session, path, dc_info.ref)
@@ -1453,9 +1411,9 @@ def _check_if_folder_file_exists(self, ds_browser, ds_ref, ds_name,
# Ensure that the cache folder exists
self.check_cache_folder(ds_name, ds_ref)
# Check if the file exists or not.
- folder_path = ds_util.build_datastore_path(ds_name, folder_name)
+ folder_ds_path = ds_util.DatastorePath(ds_name, folder_name)
file_exists = ds_util.file_exists(self._session, ds_browser,
- folder_path, file_name)
+ folder_ds_path, file_name)
return file_exists
def inject_network_info(self, instance, network_info):
@@ -1470,13 +1428,13 @@ def manage_image_cache(self, context, instances):
LOG.debug("Image aging disabled. Aging will not be done.")
return
- datastores = vm_util.get_available_datastores(self._session,
+ datastores = ds_util.get_available_datastores(self._session,
self._cluster,
self._datastore_regex)
datastores_info = []
for ds in datastores:
- ds_info = self.get_datacenter_ref_and_name(ds['ref'])
- datastores_info.append((ds, ds_info))
+ dc_info = self.get_datacenter_ref_and_name(ds.ref)
+ datastores_info.append((ds, dc_info))
self._imagecache.update(context, instances, datastores_info)
def _get_valid_vms_from_retrieve_result(self, retrieve_result):
@@ -1511,6 +1469,74 @@ def instance_exists(self, instance):
except exception.InstanceNotFound:
return False
+ def attach_interface(self, instance, image_meta, vif):
+ """Attach an interface to the instance."""
+ vif_model = image_meta.get("hw_vif_model",
+ constants.DEFAULT_VIF_MODEL)
+ vif_model = vm_util.convert_vif_model(vif_model)
+ vif_info = vmwarevif.get_vif_dict(self._session, self._cluster,
+ vif_model, utils.is_neutron(), vif)
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
+ # Ensure that there is not a race with the port index management
+ with lockutils.lock(instance.uuid,
+ lock_file_prefix='nova-vmware-hot-plug'):
+ port_index = vm_util.get_attach_port_index(self._session, vm_ref)
+ client_factory = self._session._get_vim().client.factory
+ attach_config_spec = vm_util.get_network_attach_config_spec(
+ client_factory, vif_info, port_index)
+ LOG.debug("Reconfiguring VM to attach interface",
+ instance=instance)
+ try:
+ vm_util.reconfigure_vm(self._session, vm_ref,
+ attach_config_spec)
+ except Exception as e:
+ LOG.error(_LE('Attaching network adapter failed. Exception: '
+ ' %s'),
+ e, instance=instance)
+ raise exception.InterfaceAttachFailed(
+ instance_uuid=instance['uuid'])
+ LOG.debug("Reconfigured VM to attach interface", instance=instance)
+
+ def detach_interface(self, instance, vif):
+ """Detach an interface from the instance."""
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
+ # Ensure that there is not a race with the port index management
+ with lockutils.lock(instance.uuid,
+ lock_file_prefix='nova-vmware-hot-plug'):
+ port_index = vm_util.get_vm_detach_port_index(self._session,
+ vm_ref,
+ vif['id'])
+ if port_index is None:
+ msg = _("No device with interface-id %s exists on "
+ "VM") % vif['id']
+ raise exception.NotFound(msg)
+
+ hardware_devices = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "config.hardware.device")
+ device = vmwarevif.get_network_device(hardware_devices,
+ vif['address'])
+ if device is None:
+ msg = _("No device with MAC address %s exists on the "
+ "VM") % vif['address']
+ raise exception.NotFound(msg)
+
+ client_factory = self._session._get_vim().client.factory
+ detach_config_spec = vm_util.get_network_detach_config_spec(
+ client_factory, device, port_index)
+ LOG.debug("Reconfiguring VM to detach interface",
+ instance=instance)
+ try:
+ vm_util.reconfigure_vm(self._session, vm_ref,
+ detach_config_spec)
+ except Exception as e:
+ LOG.error(_LE('Detaching network adapter failed. Exception: '
+ '%s'),
+ e, instance=instance)
+ raise exception.InterfaceDetachFailed(
+ instance_uuid=instance['uuid'])
+ LOG.debug("Reconfigured VM to detach interface", instance=instance)
+
class VMwareVCVMOps(VMwareVMOps):
"""Management class for VM-related tasks.
@@ -1604,6 +1630,6 @@ def get_vnc_console(self, instance):
# NOTE: VM can move hosts in some situations. Debug for admins.
LOG.debug("VM %(uuid)s is currently on host %(host_name)s",
- {'uuid': instance['name'], 'host_name': host_name},
+ {'uuid': instance.name, 'host_name': host_name},
instance=instance)
return vnc_console
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py
index 65f6b7fed0..38ed03d039 100644
--- a/nova/virt/vmwareapi/vmware_images.py
+++ b/nova/virt/vmwareapi/vmware_images.py
@@ -14,22 +14,136 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
-Utility functions for Image transfer.
+Utility functions for Image transfer and manipulation.
"""
import os
+from oslo.config import cfg
+
from nova import exception
from nova import image
from nova.openstack.common import log as logging
+from nova.openstack.common import strutils
+from nova.openstack.common import units
+from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import io_util
from nova.virt.vmwareapi import read_write_util
+# NOTE(mdbooth): We use use_linked_clone below, but don't have to import it
+# because nova.virt.vmwareapi.driver is imported first. In fact, it is not
+# possible to import it here, as nova.virt.vmwareapi.driver calls
+# CONF.register_opts() after the import chain which imports this module. This
+# is not a problem as long as the import order doesn't change.
+CONF = cfg.CONF
+
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
QUEUE_BUFFER_SIZE = 10
+LINKED_CLONE_PROPERTY = 'vmware_linked_clone'
+
+
+class VMwareImage(object):
+ def __init__(self, image_id,
+ file_size=0,
+ os_type=constants.DEFAULT_OS_TYPE,
+ adapter_type=constants.DEFAULT_ADAPTER_TYPE,
+ disk_type=constants.DEFAULT_DISK_TYPE,
+ file_type=constants.DEFAULT_DISK_FORMAT,
+ linked_clone=None,
+ vif_model=constants.DEFAULT_VIF_MODEL):
+ """VMwareImage holds values for use in building VMs.
+
+ image_id (str): uuid of the image
+ file_size (int): size of file in bytes
+ os_type (str): name of guest os (use vSphere names only)
+ adapter_type (str): name of the adapter's type
+ disk_type (str): type of disk in thin, thick, etc
+ file_type (str): vmdk or iso
+ linked_clone(bool): use linked clone, or don't
+ """
+ self.image_id = image_id
+ self.file_size = file_size
+ self.os_type = os_type
+ self.adapter_type = adapter_type
+ self.disk_type = disk_type
+ self.file_type = file_type
+
+ # NOTE(vui): This should be removed when we restore the
+ # descriptor-based validation.
+ if (self.file_type is not None and
+ self.file_type not in constants.DISK_FORMATS_ALL):
+ raise exception.InvalidDiskFormat(disk_format=self.file_type)
+
+ if linked_clone is not None:
+ self.linked_clone = linked_clone
+ else:
+ self.linked_clone = CONF.vmware.use_linked_clone
+ self.vif_model = vif_model
+
+ @property
+ def file_size_in_kb(self):
+ return self.file_size / units.Ki
+
+ @property
+ def file_size_in_gb(self):
+ return self.file_size / units.Gi
+
+ @property
+ def is_sparse(self):
+ return self.disk_type == constants.DISK_TYPE_SPARSE
+
+ @property
+ def is_iso(self):
+ return self.file_type == constants.DISK_FORMAT_ISO
+
+ @classmethod
+ def from_image(cls, image_id, image_meta=None):
+ """Returns VMwareImage, the subset of properties the driver uses.
+
+ :param image_id - image id of image
+ :param image_meta - image metadata we are working with
+ :return: vmware image object
+ :rtype: nova.virt.vmwareapi.vmware_images.VmwareImage
+ """
+ if image_meta is None:
+ image_meta = {}
+
+ properties = image_meta.get("properties", {})
+
+ # calculate linked_clone flag, allow image properties to override the
+ # global property set in the configurations.
+ image_linked_clone = properties.get(LINKED_CLONE_PROPERTY,
+ CONF.vmware.use_linked_clone)
+
+ # catch any string values that need to be interpreted as boolean values
+ linked_clone = strutils.bool_from_string(image_linked_clone)
+
+ props = {
+ 'image_id': image_id,
+ 'linked_clone': linked_clone
+ }
+
+ if 'size' in image_meta:
+ props['file_size'] = image_meta['size']
+ if 'disk_format' in image_meta:
+ props['file_type'] = image_meta['disk_format']
+
+ props_map = {
+ 'vmware_ostype': 'os_type',
+ 'vmware_adaptertype': 'adapter_type',
+ 'vmware_disktype': 'disk_type',
+ 'hw_vif_model': 'vif_model'
+ }
+
+ for k, v in props_map.iteritems():
+ if k in properties:
+ props[v] = properties[k]
+
+ return cls(**props)
+
def start_transfer(context, read_file_handle, data_size,
write_file_handle=None, image_id=None, image_meta=None):
@@ -171,18 +285,3 @@ def upload_image(context, image, instance, **kwargs):
image_id=metadata['id'], image_meta=image_metadata)
LOG.debug("Uploaded image %s to the Glance image server", image,
instance=instance)
-
-
-def get_vmdk_size_and_properties(context, image, instance):
- """Get size of the vmdk file that is to be downloaded for attach in spawn.
- Need this to create the dummy virtual disk for the meta-data file. The
- geometry of the disk created depends on the size.
- """
-
- LOG.debug("Getting image size for the image %s", image,
- instance=instance)
- meta_data = IMAGE_API.get(context, image)
- size, properties = meta_data["size"], meta_data["properties"]
- LOG.debug("Got image size of %(size)s for the image %(image)s",
- {'size': size, 'image': image}, instance=instance)
- return size, properties
diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py
index c332402d82..1ca01f90da 100644
--- a/nova/virt/vmwareapi/volumeops.py
+++ b/nova/virt/vmwareapi/volumeops.py
@@ -20,7 +20,7 @@
from oslo.config import cfg
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
@@ -33,10 +33,9 @@
class VMwareVolumeOps(object):
"""Management class for Volume-related tasks."""
- def __init__(self, session, cluster=None, vc_support=False):
+ def __init__(self, session, cluster=None):
self._session = session
self._cluster = cluster
- self._vc_support = vc_support
def attach_disk_to_vm(self, vm_ref, instance,
adapter_type, disk_type, vmdk_path=None,
@@ -439,10 +438,6 @@ def _consolidate_vmdk_volume(self, instance, vm_ref, device, volume_ref):
is on the datastore of the instance.
"""
- # Consolidation only supported with VC driver
- if not self._vc_support:
- return
-
original_device = self._get_vmdk_base_volume_device(volume_ref)
original_device_path = original_device.backing.fileName
@@ -568,7 +563,7 @@ def attach_root_volume(self, connection_info, instance, mountpoint,
driver_type = connection_info['driver_volume_type']
LOG.debug("Root volume attach. Driver type: %s", driver_type,
instance=instance)
- if self._vc_support and driver_type == 'vmdk':
+ if driver_type == 'vmdk':
vm_ref = vm_util.get_vm_ref(self._session, instance)
data = connection_info['data']
# Get the volume ref
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index df47d29810..17480dc1f4 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -28,8 +28,8 @@
from nova import context
from nova import crypto
from nova import exception
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
@@ -338,7 +338,7 @@ def inject_file(self, path, contents):
def resetnetwork(self):
LOG.debug('Resetting network', instance=self.instance)
- #NOTE(johngarbutt) old FreeBSD and Gentoo agents return 500 on success
+ # NOTE(johngarbutt) old FreeBSD and Gentoo agents return 500 on success
return self._call_agent('resetnetwork',
timeout=CONF.xenserver.agent_resetnetwork_timeout,
success_codes=['0', '500'])
diff --git a/nova/virt/xenapi/client/objects.py b/nova/virt/xenapi/client/objects.py
index a358d41b8e..5cc91eb4c7 100644
--- a/nova/virt/xenapi/client/objects.py
+++ b/nova/virt/xenapi/client/objects.py
@@ -46,7 +46,7 @@ class XenAPISessionObject(object):
to use get_all(), but this often leads to races as objects
get deleted under your feet. It is preferable to use the undocumented:
* vms = session.VM.get_all_records_where(
- 'field "is_control_domain"="true"')
+ 'field "is_control_domain"="true"')
"""
diff --git a/nova/virt/xenapi/client/session.py b/nova/virt/xenapi/client/session.py
index 139e1c184c..eae76f5ded 100644
--- a/nova/virt/xenapi/client/session.py
+++ b/nova/virt/xenapi/client/session.py
@@ -25,8 +25,8 @@
from nova import context
from nova import exception
+from nova.i18n import _
from nova import objects
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import versionutils
from nova import utils
@@ -77,7 +77,7 @@ def __init__(self, url, user, pw):
import XenAPI
self.XenAPI = XenAPI
self._sessions = queue.Queue()
- self.is_slave = False
+ self.is_subordinate = False
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
url = self._create_first_session(url, user, pw, exception)
@@ -107,13 +107,13 @@ def _create_first_session(self, url, user, pw, exception):
with timeout.Timeout(CONF.xenserver.login_timeout, exception):
session.login_with_password(user, pw)
except self.XenAPI.Failure as e:
- # if user and pw of the master are different, we're doomed!
+ # if user and pw of the main are different, we're doomed!
if e.details[0] == 'HOST_IS_SLAVE':
- master = e.details[1]
- url = pool.swap_xapi_host(url, master)
+ main = e.details[1]
+ url = pool.swap_xapi_host(url, main)
session = self.XenAPI.Session(url)
session.login_with_password(user, pw)
- self.is_slave = True
+ self.is_subordinate = True
else:
raise
self._sessions.put(session)
@@ -127,7 +127,7 @@ def _populate_session_pool(self, url, user, pw, exception):
self._sessions.put(session)
def _get_host_uuid(self):
- if self.is_slave:
+ if self.is_subordinate:
aggr = objects.AggregateList.get_by_host(
context.get_admin_context(),
CONF.host, key=pool_states.POOL_FLAG)[0]
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 77c962ddde..97aa46f208 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -40,7 +40,7 @@
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import units
@@ -228,6 +228,10 @@ def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance."""
self._vmops.snapshot(context, instance, image_id, update_task_state)
+ def post_interrupted_snapshot_cleanup(self, context, instance):
+ """Cleans up any resources left after a failed snapshot."""
+ self._vmops.post_interrupted_snapshot_cleanup(context, instance)
+
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot VM instance."""
@@ -255,7 +259,7 @@ def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks)
def cleanup(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True, migrate_data=None):
+ destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup after instance being destroyed by Hypervisor."""
pass
@@ -269,11 +273,13 @@ def unpause(self, instance):
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
- block_device_info=None):
+ block_device_info=None,
+ timeout=0, retry_interval=0):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk
"""
# NOTE(vish): Xen currently does not use network info.
+ # TODO(PhilDay): Add support for timeout (clean shutdown)
return self._vmops.migrate_disk_and_power_off(context, instance,
dest, flavor, block_device_info)
@@ -299,8 +305,9 @@ def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
self._vmops.unrescue(instance)
- def power_off(self, instance):
+ def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
+ # TODO(PhilDay): Add support for timeout (clean shutdown)
self._vmops.power_off(instance)
def power_on(self, context, instance, network_info,
@@ -344,6 +351,10 @@ def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
+ def get_instance_diagnostics(self, instance):
+ """Return data about VM diagnostics."""
+ return self._vmops.get_instance_diagnostics(instance)
+
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
@@ -580,7 +591,8 @@ def post_live_migration_at_destination(self, context, instance,
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
- :param : block_migration: if true, post operation of block_migration.
+ :param block_migration: if true, post operation of block_migration.
+
"""
self._vmops.post_live_migration_at_destination(context, instance,
network_info, block_device_info, block_device_info)
@@ -673,7 +685,6 @@ def resume_state_on_host_boot(self, context, instance, network_info,
def get_per_instance_usage(self):
"""Get information about instance resource usage.
- :returns: dict of nova uuid => dict of usage
- info
+ :returns: dict of nova uuid => dict of usage info
"""
return self._vmops.get_per_instance_usage()
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index eabe139aec..780a663386 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -12,8 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
#
-#============================================================================
-#
+
+
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
# interface included in the Python distribution.
#
@@ -57,7 +57,7 @@
import zlib
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
@@ -113,7 +113,7 @@ def create_host(name_label, hostname='fake_name', address='fake_addr'):
# Create a pool if we don't have one already
if len(_db_content['pool']) == 0:
pool_ref = _create_pool('')
- _db_content['pool'][pool_ref]['master'] = host_ref
+ _db_content['pool'][pool_ref]['main'] = host_ref
_db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref
_db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref
@@ -236,6 +236,14 @@ def after_VBD_create(vbd_ref, vbd_rec):
vdi_rec['VBDs'].append(vbd_ref)
+def after_VIF_create(vif_ref, vif_rec):
+ """Create backref from VM to VIF when VIF is created.
+ """
+ vm_ref = vif_rec['VM']
+ vm_rec = _db_content['VM'][vm_ref]
+ vm_rec['VIFs'].append(vif_ref)
+
+
def after_VM_create(vm_ref, vm_rec):
"""Create read-only fields in the VM record."""
vm_rec.setdefault('domid', -1)
@@ -245,6 +253,7 @@ def after_VM_create(vm_ref, vm_rec):
vm_rec.setdefault('memory_dynamic_max', str(8 * units.Gi))
vm_rec.setdefault('VCPUs_max', str(4))
vm_rec.setdefault('VBDs', [])
+ vm_rec.setdefault('VIFs', [])
vm_rec.setdefault('resident_on', '')
@@ -621,7 +630,7 @@ def VDI_clone(self, _1, vdi_to_clone_ref):
return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref)
def host_compute_free_memory(self, _1, ref):
- #Always return 12GB available
+ # Always return 12GB available
return 12 * units.Gi
def _plugin_agent_version(self, method, args):
@@ -832,7 +841,7 @@ def __getattr__(self, name):
return self._session
elif name == 'xenapi':
return _Dispatcher(self.xenapi_request, None)
- elif name.startswith('login') or name.startswith('slave_local'):
+ elif name.startswith('login') or name.startswith('subordinate_local'):
return lambda *params: self._login(name, params)
elif name.startswith('Async'):
return lambda *params: self._async(name, params)
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index afa484a80c..e66c0eda90 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -25,9 +25,8 @@
from nova.compute import vm_states
from nova import context
from nova import exception
+from nova.i18n import _
from nova import objects
-from nova.objects import service as service_obj
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.pci import pci_whitelist
@@ -120,8 +119,8 @@ def set_host_enabled(self, enabled):
# Since capabilities are gone, use service table to disable a node
# in scheduler
cntxt = context.get_admin_context()
- service = service_obj.Service.get_by_args(cntxt, CONF.host,
- 'nova-compute')
+ service = objects.Service.get_by_args(cntxt, CONF.host,
+ 'nova-compute')
service.disabled = not enabled
service.disabled_reason = 'set by xenapi host_state'
service.save()
diff --git a/nova/virt/xenapi/image/bittorrent.py b/nova/virt/xenapi/image/bittorrent.py
index a77775d113..d400feef5b 100644
--- a/nova/virt/xenapi/image/bittorrent.py
+++ b/nova/virt/xenapi/image/bittorrent.py
@@ -17,7 +17,7 @@
import pkg_resources
import six.moves.urllib.parse as urlparse
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
import nova.openstack.common.log as logging
from nova.virt.xenapi import vm_utils
diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py
index cfa983fbcb..d838f69886 100644
--- a/nova/virt/xenapi/network_utils.py
+++ b/nova/virt/xenapi/network_utils.py
@@ -19,7 +19,7 @@
"""
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
def find_network_with_name_label(session, name_label):
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 9867dbad72..4f51e80c8d 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -22,7 +22,7 @@
from nova.compute import rpcapi as compute_rpcapi
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt.xenapi import pool_states
@@ -65,7 +65,7 @@ def undo_aggregate_operation(self, context, op, aggregate,
'during operation on %(host)s'),
{'aggregate_id': aggregate['id'], 'host': host})
- def add_to_aggregate(self, context, aggregate, host, slave_info=None):
+ def add_to_aggregate(self, context, aggregate, host, subordinate_info=None):
"""Add a compute host to an aggregate."""
if not pool_states.is_hv_pool(aggregate['metadata']):
return
@@ -78,43 +78,43 @@ def add_to_aggregate(self, context, aggregate, host, slave_info=None):
raise exception.InvalidAggregateAction(
action='add host',
aggregate_id=aggregate['id'],
- reason=aggregate['metadata'][pool_states.KEY])
+ reason=invalid[aggregate['metadata'][pool_states.KEY]])
if (aggregate['metadata'][pool_states.KEY] == pool_states.CREATED):
aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING})
if len(aggregate['hosts']) == 1:
- # this is the first host of the pool -> make it master
+ # this is the first host of the pool -> make it main
self._init_pool(aggregate['id'], aggregate['name'])
- # save metadata so that we can find the master again
- metadata = {'master_compute': host,
+ # save metadata so that we can find the main again
+ metadata = {'main_compute': host,
host: self._host_uuid,
pool_states.KEY: pool_states.ACTIVE}
aggregate.update_metadata(metadata)
else:
# the pool is already up and running, we need to figure out
# whether we can serve the request from this host or not.
- master_compute = aggregate['metadata']['master_compute']
- if master_compute == CONF.host and master_compute != host:
- # this is the master -> do a pool-join
- # To this aim, nova compute on the slave has to go down.
+ main_compute = aggregate['metadata']['main_compute']
+ if main_compute == CONF.host and main_compute != host:
+ # this is the main -> do a pool-join
+ # To this aim, nova compute on the subordinate has to go down.
# NOTE: it is assumed that ONLY nova compute is running now
- self._join_slave(aggregate['id'], host,
- slave_info.get('compute_uuid'),
- slave_info.get('url'), slave_info.get('user'),
- slave_info.get('passwd'))
- metadata = {host: slave_info.get('xenhost_uuid'), }
+ self._join_subordinate(aggregate['id'], host,
+ subordinate_info.get('compute_uuid'),
+ subordinate_info.get('url'), subordinate_info.get('user'),
+ subordinate_info.get('passwd'))
+ metadata = {host: subordinate_info.get('xenhost_uuid'), }
aggregate.update_metadata(metadata)
- elif master_compute and master_compute != host:
- # send rpc cast to master, asking to add the following
+ elif main_compute and main_compute != host:
+ # send rpc cast to main, asking to add the following
# host with specified credentials.
- slave_info = self._create_slave_info()
+ subordinate_info = self._create_subordinate_info()
self.compute_rpcapi.add_aggregate_host(
- context, aggregate, host, master_compute, slave_info)
+ context, aggregate, host, main_compute, subordinate_info)
- def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
+ def remove_from_aggregate(self, context, aggregate, host, subordinate_info=None):
"""Remove a compute host from an aggregate."""
- slave_info = slave_info or dict()
+ subordinate_info = subordinate_info or dict()
if not pool_states.is_hv_pool(aggregate['metadata']):
return
@@ -127,19 +127,19 @@ def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
aggregate_id=aggregate['id'],
reason=invalid[aggregate['metadata'][pool_states.KEY]])
- master_compute = aggregate['metadata']['master_compute']
- if master_compute == CONF.host and master_compute != host:
- # this is the master -> instruct it to eject a host from the pool
+ main_compute = aggregate['metadata']['main_compute']
+ if main_compute == CONF.host and main_compute != host:
+ # this is the main -> instruct it to eject a host from the pool
host_uuid = aggregate['metadata'][host]
- self._eject_slave(aggregate['id'],
- slave_info.get('compute_uuid'), host_uuid)
+ self._eject_subordinate(aggregate['id'],
+ subordinate_info.get('compute_uuid'), host_uuid)
aggregate.update_metadata({host: None})
- elif master_compute == host:
- # Remove master from its own pool -> destroy pool only if the
- # master is on its own, otherwise raise fault. Destroying a
- # pool made only by master is fictional
+ elif main_compute == host:
+ # Remove main from its own pool -> destroy pool only if the
+ # main is on its own, otherwise raise fault. Destroying a
+ # pool made only by main is fictional
if len(aggregate['hosts']) > 1:
- # NOTE: this could be avoided by doing a master
+ # NOTE: this could be avoided by doing a main
# re-election, but this is simpler for now.
raise exception.InvalidAggregateAction(
aggregate_id=aggregate['id'],
@@ -148,32 +148,32 @@ def remove_from_aggregate(self, context, aggregate, host, slave_info=None):
'from the pool; pool not empty')
% host)
self._clear_pool(aggregate['id'])
- aggregate.update_metadata({'master_compute': None, host: None})
- elif master_compute and master_compute != host:
- # A master exists -> forward pool-eject request to master
- slave_info = self._create_slave_info()
+ aggregate.update_metadata({'main_compute': None, host: None})
+ elif main_compute and main_compute != host:
+ # A main exists -> forward pool-eject request to main
+ subordinate_info = self._create_subordinate_info()
self.compute_rpcapi.remove_aggregate_host(
- context, aggregate['id'], host, master_compute, slave_info)
+ context, aggregate['id'], host, main_compute, subordinate_info)
else:
# this shouldn't have happened
raise exception.AggregateError(aggregate_id=aggregate['id'],
action='remove_from_aggregate',
reason=_('Unable to eject %s '
- 'from the pool; No master found')
+ 'from the pool; No main found')
% host)
- def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd):
- """Joins a slave into a XenServer resource pool."""
+ def _join_subordinate(self, aggregate_id, host, compute_uuid, url, user, passwd):
+ """Joins a subordinate into a XenServer resource pool."""
try:
args = {'compute_uuid': compute_uuid,
'url': url,
'user': user,
'password': passwd,
'force': jsonutils.dumps(CONF.xenserver.use_join_force),
- 'master_addr': self._host_addr,
- 'master_user': CONF.xenserver.connection_username,
- 'master_pass': CONF.xenserver.connection_password, }
+ 'main_addr': self._host_addr,
+ 'main_user': CONF.xenserver.connection_username,
+ 'main_pass': CONF.xenserver.connection_password, }
self._session.call_plugin('xenhost', 'host_join', args)
except self._session.XenAPI.Failure as e:
LOG.error(_("Pool-Join failed: %s"), e)
@@ -182,8 +182,8 @@ def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd):
reason=_('Unable to join %s '
'in the pool') % host)
- def _eject_slave(self, aggregate_id, compute_uuid, host_uuid):
- """Eject a slave from a XenServer resource pool."""
+ def _eject_subordinate(self, aggregate_id, compute_uuid, host_uuid):
+ """Eject a subordinate from a XenServer resource pool."""
try:
# shutdown nova-compute; if there are other VMs running, e.g.
# guest instances, the eject will fail. That's a precaution
@@ -222,7 +222,7 @@ def _clear_pool(self, aggregate_id):
action='remove_from_aggregate',
reason=str(e.details))
- def _create_slave_info(self):
+ def _create_subordinate_info(self):
"""XenServer specific info needed to join the hypervisor pool."""
# replace the address from the xenapi connection url
# because this might be 169.254.0.1, i.e. xenapi
diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py
index ae431ddecb..f4acdf541b 100644
--- a/nova/virt/xenapi/pool_states.py
+++ b/nova/virt/xenapi/pool_states.py
@@ -25,7 +25,7 @@
A 'created' pool becomes 'changing' during the first request of
adding a host. During a 'changing' status no other requests will be accepted;
this is to allow the hypervisor layer to instantiate the underlying pool
-without any potential race condition that may incur in master/slave-based
+without any potential race condition that may incur in main/subordinate-based
configurations. The pool goes into the 'active' state when the underlying
pool has been correctly instantiated.
All other operations (e.g. add/remove hosts) that succeed will keep the
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py
index a763718d3c..74408cb3c3 100644
--- a/nova/virt/xenapi/vif.py
+++ b/nova/virt/xenapi/vif.py
@@ -19,7 +19,7 @@
from oslo.config import cfg
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.virt.xenapi import network_utils
from nova.virt.xenapi import vm_utils
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index a655b1dc8c..3f261a4834 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -37,9 +37,9 @@
from nova.compute import task_states
from nova.compute import vm_mode
from nova import exception
+from nova.i18n import _, _LI
from nova.network import model as network_model
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _, _LI
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
@@ -50,9 +50,11 @@
from nova.openstack.common import xmlutils
from nova import utils
from nova.virt import configdrive
+from nova.virt import diagnostics
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import localfs as vfsimpl
from nova.virt import hardware
+from nova.virt import netutils
from nova.virt.xenapi import agent
from nova.virt.xenapi.image import utils as image_utils
from nova.virt.xenapi import volume_utils
@@ -424,7 +426,7 @@ def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk',
"""Create a VBD record and returns its reference."""
vbd_rec = {}
vbd_rec['VM'] = vm_ref
- if vdi_ref == None:
+ if vdi_ref is None:
vdi_ref = 'OpaqueRef:NULL'
vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = str(userdevice)
@@ -726,6 +728,40 @@ def strip_base_mirror_from_vdis(session, vm_ref):
_try_strip_base_mirror_from_vdi(session, vdi_ref)
+def _get_snapshots_for_vm(session, instance, vm_ref):
+ sr_ref = safe_find_sr(session)
+ vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
+ parent_uuid = _get_vhd_parent_uuid(session, vm_vdi_ref)
+
+ if not parent_uuid:
+ return []
+
+ return _child_vhds(session, sr_ref, parent_uuid, old_snapshots_only=True)
+
+
+def remove_old_snapshots(session, instance, vm_ref):
+ """See if there is an snapshot present that should be removed."""
+ LOG.debug("Starting remove_old_snapshots for VM", instance=instance)
+
+ snapshot_uuids = _get_snapshots_for_vm(session, instance, vm_ref)
+ number_of_snapshots = len(snapshot_uuids)
+
+ if number_of_snapshots <= 0:
+ LOG.debug("No snapshots to remove.", instance=instance)
+ return
+
+ if number_of_snapshots > 1:
+ LOG.debug("More snapshots than expected, only deleting one.",
+ instance=instance)
+
+ vdi_uuid = snapshot_uuids[0]
+ vdi_ref = session.VDI.get_by_uuid(vdi_uuid)
+ safe_destroy_vdis(session, [vdi_ref])
+ scan_default_sr(session)
+ # TODO(johnthetubaguy): we could look for older snapshots too
+ LOG.debug("Removed one old snapshot.", instance=instance)
+
+
@contextlib.contextmanager
def snapshot_attached_here(session, instance, vm_ref, label, userdevice='0',
post_snapshot_callback=None):
@@ -1122,7 +1158,7 @@ def generate_single_ephemeral(session, instance, vm_ref, userdevice,
instance_name_label = instance["name"]
name_label = "%s ephemeral" % instance_name_label
- #TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here
+ # TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here
label_number = int(userdevice) - 4
if label_number > 0:
name_label = "%s (%d)" % (name_label, label_number)
@@ -1716,8 +1752,8 @@ def lookup_vm_vdis(session, vm_ref):
def lookup(session, name_label, check_rescue=False):
"""Look the instance up and return it if available.
- :param check_rescue: if True will return the 'name'-rescue vm if it
- exists, instead of just 'name'
+ :param:check_rescue: if True will return the 'name'-rescue vm if it
+ exists, instead of just 'name'
"""
if check_rescue:
result = lookup(session, name_label + '-rescue', False)
@@ -1736,12 +1772,13 @@ def lookup(session, name_label, check_rescue=False):
def preconfigure_instance(session, instance, vdi_ref, network_info):
"""Makes alterations to the image before launching as part of spawn.
"""
+ key = str(instance['key_data'])
+ net = netutils.get_injected_network_template(network_info)
+ metadata = instance['metadata']
# As mounting the image VDI is expensive, we only want do it once,
# if at all, so determine whether it's required first, and then do
# everything
- mount_required = False
- key, net, metadata = _prepare_injectables(instance, network_info)
mount_required = key or net or metadata
if not mount_required:
return
@@ -1785,12 +1822,36 @@ def compile_info(session, vm_ref):
'cpu_time': 0}
-def compile_diagnostics(record):
+def compile_instance_diagnostics(instance, vm_rec):
+ vm_power_state_int = XENAPI_POWER_STATE[vm_rec['power_state']]
+ vm_power_state = power_state.STATE_MAP[vm_power_state_int]
+ config_drive = configdrive.required_by(instance)
+
+ diags = diagnostics.Diagnostics(state=vm_power_state,
+ driver='xenapi',
+ config_drive=config_drive)
+
+ for cpu_num in range(0, long(vm_rec['VCPUs_max'])):
+ diags.add_cpu()
+
+ for vif in vm_rec['VIFs']:
+ diags.add_nic()
+
+ for vbd in vm_rec['VBDs']:
+ diags.add_disk()
+
+ max_mem_bytes = long(vm_rec['memory_dynamic_max'])
+ diags.memory_details.maximum = max_mem_bytes / units.Mi
+
+ return diags
+
+
+def compile_diagnostics(vm_rec):
"""Compile VM diagnostics data."""
try:
keys = []
diags = {}
- vm_uuid = record["uuid"]
+ vm_uuid = vm_rec["uuid"]
xml = _get_rrd(_get_rrd_server(), vm_uuid)
if xml:
rrd = xmlutils.safe_minidom_parse_string(xml)
@@ -2021,7 +2082,14 @@ def _walk_vdi_chain(session, vdi_uuid):
vdi_uuid = parent_uuid
-def _child_vhds(session, sr_ref, vdi_uuid):
+def _is_vdi_a_snapshot(vdi_rec):
+ """Ensure VDI is a snapshot, and not cached image."""
+ is_a_snapshot = vdi_rec['is_a_snapshot']
+ image_id = vdi_rec['other_config'].get('image-id')
+ return is_a_snapshot and not image_id
+
+
+def _child_vhds(session, sr_ref, vdi_uuid, old_snapshots_only=False):
"""Return the immediate children of a given VHD.
This is not recursive, only the immediate children are returned.
@@ -2037,9 +2105,12 @@ def _child_vhds(session, sr_ref, vdi_uuid):
if parent_uuid != vdi_uuid:
continue
+ if old_snapshots_only and not _is_vdi_a_snapshot(rec):
+ continue
+
children.add(rec_uuid)
- return children
+ return list(children)
def _count_parents_children(session, vdi_ref, sr_ref):
@@ -2455,103 +2526,6 @@ def _mounted_processing(device, key, net, metadata):
'non-linux instances): %s') % err)
-def _prepare_injectables(inst, network_info):
- """prepares the ssh key and the network configuration file to be
- injected into the disk image
- """
- #do the import here - Jinja2 will be loaded only if injection is performed
- import jinja2
- tmpl_path, tmpl_file = os.path.split(CONF.injected_network_template)
- env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path),
- trim_blocks=True)
- template = env.get_template(tmpl_file)
-
- metadata = inst['metadata']
- key = str(inst['key_data'])
- net = None
- if network_info:
- ifc_num = -1
- interfaces_info = []
- for vif in network_info:
- ifc_num += 1
- try:
- if not vif['network'].get_meta('injected'):
- # network is not specified injected
- continue
- except KeyError:
- # vif network is None
- continue
-
- # NOTE(tr3buchet): using all subnets in case dns is stored in a
- # subnet that isn't chosen as first v4 or v6
- # subnet in the case where there is more than one
- # dns = list of address of each dns entry from each vif subnet
- dns = [ip['address'] for subnet in vif['network']['subnets']
- for ip in subnet['dns']]
- dns = ' '.join(dns).strip()
-
- interface_info = {'name': 'eth%d' % ifc_num,
- 'address': '',
- 'netmask': '',
- 'gateway': '',
- 'broadcast': '',
- 'dns': dns or '',
- 'address_v6': '',
- 'netmask_v6': '',
- 'gateway_v6': '',
- 'use_ipv6': CONF.use_ipv6}
-
- # NOTE(tr3buchet): the original code used the old network_info
- # which only supported a single ipv4 subnet
- # (and optionally, a single ipv6 subnet).
- # I modified it to use the new network info model,
- # which adds support for multiple v4 or v6
- # subnets. I chose to ignore any additional
- # subnets, just as the original code ignored
- # additional IP information
-
- # populate v4 info if v4 subnet and ip exist
- try:
- # grab the first v4 subnet (or it raises)
- subnet = [s for s in vif['network']['subnets']
- if s['version'] == 4][0]
- # get the subnet's first ip (or it raises)
- ip = subnet['ips'][0]
-
- # populate interface_info
- subnet_netaddr = subnet.as_netaddr()
- interface_info['address'] = ip['address']
- interface_info['netmask'] = subnet_netaddr.netmask
- interface_info['gateway'] = subnet['gateway']['address']
- interface_info['broadcast'] = subnet_netaddr.broadcast
- except IndexError:
- # there isn't a v4 subnet or there are no ips
- pass
-
- # populate v6 info if v6 subnet and ip exist
- try:
- # grab the first v6 subnet (or it raises)
- subnet = [s for s in vif['network']['subnets']
- if s['version'] == 6][0]
- # get the subnet's first ip (or it raises)
- ip = subnet['ips'][0]
-
- # populate interface_info
- interface_info['address_v6'] = ip['address']
- interface_info['netmask_v6'] = subnet.as_netaddr().netmask
- interface_info['gateway_v6'] = subnet['gateway']['address']
- except IndexError:
- # there isn't a v6 subnet or there are no ips
- pass
-
- interfaces_info.append(interface_info)
-
- if interfaces_info:
- net = template.render({'interfaces': interfaces_info,
- 'use_ipv6': CONF.use_ipv6})
- return key, net, metadata
-
-
def ensure_correct_host(session):
"""Ensure we're connected to the host we're running on. This is the
required configuration for anything that uses vdi_attached_here.
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index c7db7ea992..0cba7cda19 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -35,9 +35,9 @@
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
+from nova.i18n import _
from nova import objects
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
@@ -266,7 +266,7 @@ def null_step_decorator(f):
def create_disks_step(undo_mgr, disk_image_type, image_meta,
name_label):
- #TODO(johngarbutt) clean up if this is not run
+ # TODO(johngarbutt) clean up if this is not run
vdis = vm_utils.import_all_migrated_disks(self._session,
instance)
@@ -341,6 +341,9 @@ def undo_create_disks():
vdi_refs = [vdi['ref'] for vdi in vdis.values()
if not vdi.get('osvol')]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
+ vol_vdi_refs = [vdi['ref'] for vdi in vdis.values()
+ if vdi.get('osvol')]
+ self._volumeops.safe_cleanup_from_vdis(vol_vdi_refs)
undo_mgr.undo_with(undo_create_disks)
return vdis
@@ -749,6 +752,11 @@ def snapshot(self, context, instance, image_id, update_task_state):
LOG.debug("Finished snapshot and upload for VM",
instance=instance)
+ def post_interrupted_snapshot_cleanup(self, context, instance):
+ """Cleans up any resources left after a failed snapshot."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ vm_utils.remove_old_snapshots(self._session, instance, vm_ref)
+
def _get_orig_vm_name_label(self, instance):
return instance['name'] + '-orig'
@@ -991,7 +999,7 @@ def power_down_and_transfer_leaf_vhds(root_vdi_uuid,
instance=instance)
try:
self._restore_orig_vm_and_cleanup_orphan(instance)
- #TODO(johngarbutt) should also cleanup VHDs at destination
+ # TODO(johngarbutt) should also cleanup VHDs at destination
except Exception as rollback_error:
LOG.warn(_("_migrate_disk_resizing_up failed to "
"rollback: %s"), rollback_error,
@@ -1400,11 +1408,11 @@ def rescue(self, context, instance, network_info, image_meta,
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
- rescue_name_label = '%s-rescue' % instance['name']
+ rescue_name_label = '%s-rescue' % instance.name
rescue_vm_ref = vm_utils.lookup(self._session, rescue_name_label)
if rescue_vm_ref:
raise RuntimeError(_("Instance is already in Rescue Mode: %s")
- % instance['name'])
+ % instance.name)
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
@@ -1429,10 +1437,10 @@ def unrescue(self, instance):
"""
rescue_vm_ref = vm_utils.lookup(self._session,
- "%s-rescue" % instance['name'])
+ "%s-rescue" % instance.name)
if not rescue_vm_ref:
raise exception.InstanceNotInRescueMode(
- instance_id=instance['uuid'])
+ instance_id=instance.uuid)
original_vm_ref = self._get_vm_opaque_ref(instance)
@@ -1512,6 +1520,12 @@ def get_diagnostics(self, instance):
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_diagnostics(vm_rec)
+ def get_instance_diagnostics(self, instance):
+ """Return data about VM diagnostics using the common API."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ vm_rec = self._session.VM.get_record(vm_ref)
+ return vm_utils.compile_instance_diagnostics(instance, vm_rec)
+
def _get_vif_device_map(self, vm_rec):
vif_map = {}
for vif in [self._session.call_xenapi("VIF.get_record", vrec)
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index d1a01d58f2..080a8fba2f 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -25,7 +25,7 @@
from oslo.config import cfg
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
xenapi_volume_utils_opts = [
@@ -301,6 +301,17 @@ def find_sr_from_vbd(session, vbd_ref):
return sr_ref
+def find_sr_from_vdi(session, vdi_ref):
+ """Find the SR reference from the VDI reference."""
+ try:
+ sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
+ except session.XenAPI.Failure as exc:
+ LOG.exception(exc)
+ raise exception.StorageError(
+ reason=_('Unable to find SR from VDI %s') % vdi_ref)
+ return sr_ref
+
+
def find_vbd_by_number(session, vm_ref, dev_number):
"""Get the VBD reference from the device number."""
vbd_refs = session.VM.get_VBDs(vm_ref)
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 5eb28165af..26607eaed9 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -18,8 +18,8 @@
"""
from nova import exception
+from nova.i18n import _
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
@@ -37,7 +37,7 @@ def __init__(self, session):
def attach_volume(self, connection_info, instance_name, mountpoint,
hotplug=True):
"""Attach volume to VM instance."""
- #TODO(johngarbutt) move this into _attach_volume_to_vm
+ # TODO(johngarbutt) move this into _attach_volume_to_vm
dev_number = volume_utils.get_device_number(mountpoint)
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
@@ -204,3 +204,20 @@ def find_bad_volumes(self, vm_ref):
raise
return bad_devices
+
+ def safe_cleanup_from_vdis(self, vdi_refs):
+ # A helper method to detach volumes that are not associated with an
+ # instance
+
+ for vdi_ref in vdi_refs:
+ try:
+ sr_ref = volume_utils.find_sr_from_vdi(self._session, vdi_ref)
+ except exception.StorageError as exc:
+ LOG.debug(exc.format_message())
+ continue
+ try:
+ # Forget (i.e. disconnect) SR only if not in use
+ volume_utils.purge_sr(self._session, sr_ref)
+ except Exception:
+ LOG.debug('Ignoring error while purging sr: %s' % sr_ref,
+ exc_info=True)
diff --git a/nova/vnc/xvp_proxy.py b/nova/vnc/xvp_proxy.py
index 5ab95c63a0..038d2c4675 100644
--- a/nova/vnc/xvp_proxy.py
+++ b/nova/vnc/xvp_proxy.py
@@ -27,7 +27,7 @@
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import log as logging
from nova import version
from nova import wsgi
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 2ba00aa811..3e07efcd9d 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -21,14 +21,16 @@
import copy
import sys
+from cinderclient import client as cinder_client
from cinderclient import exceptions as cinder_exception
from cinderclient import service_catalog
-from cinderclient.v1 import client as cinder_client
from oslo.config import cfg
+import six.moves.urllib.parse as urlparse
from nova import availability_zones as az
from nova import exception
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
+from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
@@ -65,41 +67,17 @@
LOG = logging.getLogger(__name__)
+CINDER_URL = None
-def cinderclient(context):
-
- # FIXME: the cinderclient ServiceCatalog object is mis-named.
- # It actually contains the entire access blob.
- # Only needed parts of the service catalog are passed in, see
- # nova/context.py.
- compat_catalog = {
- 'access': {'serviceCatalog': context.service_catalog or []}
- }
- sc = service_catalog.ServiceCatalog(compat_catalog)
- if CONF.cinder_endpoint_template:
- url = CONF.cinder_endpoint_template % context.to_dict()
- else:
- info = CONF.cinder_catalog_info
- service_type, service_name, endpoint_type = info.split(':')
- # extract the region if set in configuration
- if CONF.os_region_name:
- attr = 'region'
- filter_value = CONF.os_region_name
- else:
- attr = None
- filter_value = None
- url = sc.url_for(attr=attr,
- filter_value=filter_value,
- service_type=service_type,
- service_name=service_name,
- endpoint_type=endpoint_type)
-
- LOG.debug('Cinderclient connection created using URL: %s', url)
- c = cinder_client.Client(context.user_id,
+def cinderclient(context):
+ global CINDER_URL
+ version = get_cinder_client_version(context)
+ c = cinder_client.Client(version,
+ context.user_id,
context.auth_token,
project_id=context.project_id,
- auth_url=url,
+ auth_url=CINDER_URL,
insecure=CONF.cinder_api_insecure,
retries=CONF.cinder_http_retries,
timeout=CONF.cinder_http_timeout,
@@ -107,7 +85,7 @@ def cinderclient(context):
# noauth extracts user_id:project_id from auth_token
c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id,
context.project_id)
- c.client.management_url = url
+ c.client.management_url = CINDER_URL
return c
@@ -134,10 +112,14 @@ def _untranslate_volume_summary_view(context, vol):
d['mountpoint'] = att['device']
else:
d['attach_status'] = 'detached'
-
- d['display_name'] = vol.display_name
- d['display_description'] = vol.display_description
-
+ # NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name',
+ # and use 'description' instead of 'display_description' for volume.
+ if hasattr(vol, 'display_name'):
+ d['display_name'] = vol.display_name
+ d['display_description'] = vol.display_description
+ else:
+ d['display_name'] = vol.name
+ d['display_description'] = vol.description
# TODO(jdg): Information may be lost in this translation
d['volume_type_id'] = vol.volume_type
d['snapshot_id'] = vol.snapshot_id
@@ -161,8 +143,16 @@ def _untranslate_snapshot_summary_view(context, snapshot):
d['progress'] = snapshot.progress
d['size'] = snapshot.size
d['created_at'] = snapshot.created_at
- d['display_name'] = snapshot.display_name
- d['display_description'] = snapshot.display_description
+
+ # NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name',
+ # 'description' instead of 'display_description' for snapshot.
+ if hasattr(snapshot, 'display_name'):
+ d['display_name'] = snapshot.display_name
+ d['display_description'] = snapshot.display_description
+ else:
+ d['display_name'] = snapshot.name
+ d['display_description'] = snapshot.description
+
d['volume_id'] = snapshot.volume_id
d['project_id'] = snapshot.project_id
d['volume_size'] = snapshot.size
@@ -213,6 +203,61 @@ def wrapper(self, ctx, snapshot_id, *args, **kwargs):
return wrapper
+def get_cinder_client_version(context):
+ """Parse cinder client version by endpoint url.
+
+ :param context: Nova auth context.
+ :return: str value(1 or 2).
+ """
+ global CINDER_URL
+ # FIXME: the cinderclient ServiceCatalog object is mis-named.
+ # It actually contains the entire access blob.
+ # Only needed parts of the service catalog are passed in, see
+ # nova/context.py.
+ compat_catalog = {
+ 'access': {'serviceCatalog': context.service_catalog or []}
+ }
+ sc = service_catalog.ServiceCatalog(compat_catalog)
+ if CONF.cinder_endpoint_template:
+ url = CONF.cinder_endpoint_template % context.to_dict()
+ else:
+ info = CONF.cinder_catalog_info
+ service_type, service_name, endpoint_type = info.split(':')
+ # extract the region if set in configuration
+ if CONF.os_region_name:
+ attr = 'region'
+ filter_value = CONF.os_region_name
+ else:
+ attr = None
+ filter_value = None
+ url = sc.url_for(attr=attr,
+ filter_value=filter_value,
+ service_type=service_type,
+ service_name=service_name,
+ endpoint_type=endpoint_type)
+ LOG.debug('Cinderclient connection created using URL: %s', url)
+
+ valid_versions = ['v1', 'v2']
+ magic_tuple = urlparse.urlsplit(url)
+ scheme, netloc, path, query, frag = magic_tuple
+ components = path.split("/")
+ for version in valid_versions:
+ if version in components[1]:
+ version = version[1:]
+
+ if not CINDER_URL and version == '1':
+ msg = _LW('Cinder V1 API is deprecated as of the Juno '
+ 'release, and Nova is still configured to use it. '
+ 'Enable the V2 API in Cinder and set '
+ 'cinder_catalog_info in nova.conf to use it.')
+ LOG.warn(msg)
+
+ CINDER_URL = url
+ return version
+ msg = _("Invalid client version, must be one of: %s") % valid_versions
+ raise cinder_exception.UnsupportedVersion(msg)
+
+
class API(object):
"""API for interacting with the volume manager."""
@@ -312,8 +357,6 @@ def create(self, context, size, name, description, snapshot=None,
snapshot_id = None
kwargs = dict(snapshot_id=snapshot_id,
- display_name=name,
- display_description=description,
volume_type=volume_type,
user_id=context.user_id,
project_id=context.project_id,
@@ -321,6 +364,14 @@ def create(self, context, size, name, description, snapshot=None,
metadata=metadata,
imageRef=image_id)
+ version = get_cinder_client_version(context)
+ if version == '1':
+ kwargs['display_name'] = name
+ kwargs['display_description'] = description
+ elif version == '2':
+ kwargs['name'] = name
+ kwargs['description'] = description
+
try:
item = cinderclient(context).volumes.create(size, **kwargs)
return _untranslate_volume_summary_view(context, item)
@@ -377,20 +428,28 @@ def get_volume_encryption_metadata(self, context, volume_id):
@translate_volume_exception
def get_volume_metadata(self, context, volume_id):
- raise NotImplementedError()
+ vol = cinderclient(context).volumes.get(volume_id)
+ return vol.metadata
@translate_volume_exception
- def delete_volume_metadata(self, context, volume_id, key):
- raise NotImplementedError()
+ def delete_volume_metadata(self, context, volume_id, keys):
+ cinderclient(context).volumes.delete_metadata(volume_id, keys)
@translate_volume_exception
def update_volume_metadata(self, context, volume_id,
metadata, delete=False):
- raise NotImplementedError()
+ if delete:
+ # Completely replace volume metadata with one given
+ return cinderclient(context).volumes.update_all_metadata(
+ volume_id, metadata)
+ else:
+ return cinderclient(context).volumes.set_metadata(
+ volume_id, metadata)
@translate_volume_exception
- def get_volume_metadata_value(self, volume_id, key):
- raise NotImplementedError()
+ def get_volume_metadata_value(self, context, volume_id, key):
+ vol = cinderclient(context).volumes.get(volume_id)
+ return vol.metadata.get(key)
@translate_snapshot_exception
def update_snapshot_status(self, context, snapshot_id, status):
diff --git a/nova/volume/encryptors/__init__.py b/nova/volume/encryptors/__init__.py
index 79879d2a23..8c87a9e768 100644
--- a/nova/volume/encryptors/__init__.py
+++ b/nova/volume/encryptors/__init__.py
@@ -14,7 +14,7 @@
# under the License.
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.volume.encryptors import nop
diff --git a/nova/wsgi.py b/nova/wsgi.py
index f538bcd771..4f9a95bd9c 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -34,8 +34,8 @@
import webob.exc
from nova import exception
+from nova.i18n import _
from nova.openstack.common import excutils
-from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
wsgi_opts = [
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
index 8f3c16ff18..0eafa65029 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -224,7 +224,8 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port,
httplib.REQUEST_ENTITY_TOO_LARGE,
httplib.PRECONDITION_FAILED,
httplib.CONFLICT,
- httplib.FORBIDDEN):
+ httplib.FORBIDDEN,
+ httplib.INTERNAL_SERVER_ERROR):
# No point in retrying for these conditions
raise PluginError("Got Error response [%i] while uploading "
"image [%s] "
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
index cbeea5884f..2fbef0e6c9 100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
@@ -31,7 +31,7 @@
_ = translations.ugettext
-##### Logging setup
+# Logging setup
def configure_logging(name):
log = logging.getLogger()
@@ -43,7 +43,7 @@ def configure_logging(name):
log.addHandler(sysh)
-##### Exceptions
+# Exceptions
class PluginError(Exception):
"""Base Exception class for all plugin errors."""
@@ -59,7 +59,7 @@ def __init__(self, *args):
PluginError.__init__(self, *args)
-##### Argument validation
+# Argument validation
def exists(args, key):
"""Validates that a freeform string argument to a RPC method call is given.
diff --git a/requirements.txt b/requirements.txt
index 6949d82614..e94e633eb2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,11 +1,12 @@
pbr>=0.6,!=0.7,<1.0
-SQLAlchemy>=0.8.4,!=0.9.5,<=0.9.99
+SQLAlchemy ~> 1.3.0
anyjson>=0.3.3
argparse
boto>=2.12.0,!=2.13.0
+decorator>=3.4.0
eventlet>=0.13.0
Jinja2
-keystonemiddleware
+keystonemiddleware>=1.0.0
kombu>=2.4.8
lxml>=2.3
Routes>=1.12.3,!=2.0
@@ -23,14 +24,17 @@ Babel>=1.3
iso8601>=0.1.9
jsonschema>=2.0.0,<3.0.0
python-cinderclient>=1.0.7
-python-neutronclient>=2.3.5,<3
+python-neutronclient>=2.3.6,<3
python-glanceclient>=0.13.1
-python-keystoneclient>=0.9.0
+python-keystoneclient>=0.10.0
six>=1.7.0
stevedore>=0.14
-websockify>=0.5.1,<0.6
+websockify>=0.5.1,<0.7
wsgiref>=0.1.2
-oslo.config>=1.2.1
-oslo.rootwrap
+oslo.config>=1.4.0.0a3
+oslo.rootwrap>=1.3.0.0a1
pycadf>=0.5.1
-oslo.messaging>=1.3.0
+oslo.messaging>=1.4.0.0a3
+oslo.i18n>=0.2.0 # Apache-2.0
+lockfile>=0.8
+rfc3986>=0.2.0 # Apache-2.0
diff --git a/run_tests.sh b/run_tests.sh
index 1fecc4c9b5..abc10fab15 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -12,6 +12,7 @@ function usage {
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
+ echo " -8, --pep8-only-changed Just run PEP8 and HACKING compliance check on files changed since HEAD~1"
echo " -P, --no-pep8 Don't run static code checks"
echo " -c, --coverage Generate coverage report"
echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger."
@@ -43,6 +44,7 @@ function process_options {
-f|--force) force=1;;
-u|--update) update=1;;
-p|--pep8) just_pep8=1;;
+ -8|--pep8-only-changed) just_pep8_changed=1;;
-P|--no-pep8) no_pep8=1;;
-c|--coverage) coverage=1;;
-d|--debug) debug=1;;
@@ -82,6 +84,7 @@ testrargs=
testropts=
wrapper=""
just_pep8=0
+just_pep8_changed=0
no_pep8=0
coverage=0
debug=0
@@ -167,12 +170,16 @@ function copy_subunit_log {
cp $LOGNAME subunit.log
}
-function run_pep8 {
- echo "Running flake8 ..."
+function warn_on_flake8_without_venv {
if [ $never_venv -eq 1 ]; then
- echo "**WARNING**:"
- echo "Running flake8 without virtual env may miss OpenStack HACKING detection"
+ echo "**WARNING**:"
+ echo "Running flake8 without virtual env may miss OpenStack HACKING detection"
fi
+}
+
+function run_pep8 {
+ echo "Running flake8 ..."
+ warn_on_flake8_without_venv
bash -c "${wrapper} flake8"
}
@@ -219,6 +226,19 @@ if [ $just_pep8 -eq 1 ]; then
exit
fi
+if [ $just_pep8_changed -eq 1 ]; then
+ # NOTE(gilliard) We want use flake8 to check the entirety of every file that has
+ # a change in it. Unfortunately the --filenames argument to flake8 only accepts
+ # file *names* and there are no files named (eg) "nova/compute/manager.py". The
+ # --diff argument behaves surprisingly as well, because although you feed it a
+ # diff, it actually checks the file on disk anyway.
+ files=$(git diff --name-only HEAD~1 | tr '\n' ' ')
+ echo "Running flake8 on ${files}"
+ warn_on_flake8_without_venv
+ bash -c "diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff"
+ exit
+fi
+
run_tests
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
diff --git a/setup.cfg b/setup.cfg
index cb8c651ff2..46b34e46c1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -27,6 +27,8 @@ packages =
nova
[entry_points]
+nova.compute.resources =
+ vcpu = nova.compute.resources.vcpu:VCPU
nova.image.download.modules =
file = nova.image.download.file
console_scripts =
@@ -161,3 +163,7 @@ output_file = nova/locale/nova.pot
[wheel]
universal = 1
+
+[pbr]
+autodoc_index_modules = 1
+warnerrors = true
diff --git a/test-requirements.txt b/test-requirements.txt
index 282a4dda2b..a6da658e73 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -3,6 +3,7 @@ coverage>=3.6
discover
feedparser
fixtures>=0.3.14
+libvirt-python>=1.2.5 # LGPLv2+
mock>=1.0
mox>=0.5.3
MySQL-python
@@ -10,7 +11,7 @@ psycopg2
pylint==0.25.2
python-subunit>=0.0.18
sphinx>=1.1.2,!=1.2.0,<1.3
-oslosphinx
-oslotest
+oslosphinx>=2.2.0.0a2
+oslotest>=1.1.0.0a1
testrepository>=0.0.18
testtools>=0.9.34
diff --git a/tools/db/schema_diff.py b/tools/db/schema_diff.py
index 9e88f4f22c..009db6580b 100755
--- a/tools/db/schema_diff.py
+++ b/tools/db/schema_diff.py
@@ -33,12 +33,12 @@
MYSQL:
./tools/db/schema_diff.py mysql://root@localhost \
- master:latest my_branch:82
+ main:latest my_branch:82
POSTGRESQL:
./tools/db/schema_diff.py postgresql://localhost \
- master:latest my_branch:82
+ main:latest my_branch:82
"""
from __future__ import print_function
@@ -49,7 +49,7 @@
import subprocess
import sys
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
### Dump
@@ -225,12 +225,12 @@ def parse_options():
try:
orig_branch, orig_version = sys.argv[2].split(':')
except IndexError:
- usage('original branch and version required (e.g. master:82)')
+ usage('original branch and version required (e.g. main:82)')
try:
new_branch, new_version = sys.argv[3].split(':')
except IndexError:
- usage('new branch and version required (e.g. master:82)')
+ usage('new branch and version required (e.g. main:82)')
return db_url, orig_branch, orig_version, new_branch, new_version
diff --git a/tools/esx/guest_tool.py b/tools/esx/guest_tool.py
index 4c830b05d4..c472d6cbb1 100644
--- a/tools/esx/guest_tool.py
+++ b/tools/esx/guest_tool.py
@@ -28,7 +28,7 @@
import sys
import time
-from nova.openstack.common.gettextutils import _
+from nova.i18n import _
PLATFORM_WIN = 'win32'
diff --git a/tox.ini b/tox.ini
index daeebd56d2..27363a75b6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -4,37 +4,34 @@ envlist = py26,py27,py33,pep8
skipsdist = True
[testenv]
-sitepackages = True
usedevelop = True
install_command = pip install -U --force-reinstall {opts} {packages}
+# Note the hash seed is set to 0 until nova can be tested with a
+# random hash seed successfully.
setenv = VIRTUAL_ENV={envdir}
+ PYTHONHASHSEED=0
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
python -m nova.openstack.common.lockutils python setup.py test --slowest --testr-args='{posargs}'
[tox:jenkins]
-sitepackages = True
downloadcache = ~/cache/pip
[testenv:pep8]
-sitepackages = False
commands =
flake8 {posargs}
[testenv:genconfig]
-sitepackages = False
commands =
bash tools/config/generate_sample.sh -b . -p nova -o etc/nova
[testenv:pylint]
-setenv = VIRTUAL_ENV={envdir}
commands = bash tools/lintstack.sh
[testenv:cover]
# Also do not run test_coverage_ext tests while gathering coverage as those
# tests conflict with coverage.
-setenv = VIRTUAL_ENV={envdir}
commands =
coverage erase
python -m nova.openstack.common.lockutils python setup.py testr --coverage \
@@ -48,22 +45,16 @@ commands = {posargs}
[testenv:docs]
commands = python setup.py build_sphinx
-[testenv:py27local]
-sitepackages = False
-
[flake8]
-# E712 is ignored on purpose, since it is normal to use 'column == true'
-# in sqlalchemy.
# H803 skipped on purpose per list discussion.
# E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126
# The rest of the ignores are TODOs
-# New from hacking 0.9: E129, E131, E265, H407, H405, H904
-# Stricter in hacking 0.9: F402
+# New from hacking 0.9: E129, E131, H407, H405, H904
# E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301
-ignore = E121,E122,E123,E124,E125,E129,E126,E127,E128,E131,E251,E265,E711,E712,F402,H405,H803,H904
+ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,H803,H904
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools
[hacking]
local-check-factory = nova.hacking.checks.factory
-import_exceptions = nova.openstack.common.gettextutils
+import_exceptions = nova.i18n