Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[SAP] Added admins view all volume_admin_metadata #88

Draft
wants to merge 1 commit into
base: stable/train-m3
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions cinder/api/api_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def remove_invalid_filter_options(context, filters,
_visible_admin_metadata_keys = ['readonly', 'attached_mode']


def add_visible_admin_metadata(volume):
def add_visible_admin_metadata(volume, all_admin_metadata=False):
"""Add user-visible admin metadata to regular metadata.

Extracts the admin metadata keys that are to be made visible to
Expand All @@ -82,16 +82,17 @@ def add_visible_admin_metadata(volume):
if isinstance(volume['volume_admin_metadata'], dict):
volume_admin_metadata = volume['volume_admin_metadata']
for key in volume_admin_metadata:
if key in _visible_admin_metadata_keys:
if key in _visible_admin_metadata_keys or all_admin_metadata:
visible_admin_meta[key] = volume_admin_metadata[key]
else:
for item in volume['volume_admin_metadata']:
if item['key'] in _visible_admin_metadata_keys:
if (item['key'] in _visible_admin_metadata_keys or
all_admin_metadata):
visible_admin_meta[item['key']] = item['value']
# avoid circular ref when volume is a Volume instance
elif (volume.get('admin_metadata') and
isinstance(volume.get('admin_metadata'), dict)):
for key in _visible_admin_metadata_keys:
for key in _visible_admin_metadata_keys or all_admin_metadata:
if key in volume['admin_metadata'].keys():
visible_admin_meta[key] = volume['admin_metadata'][key]

Expand Down
19 changes: 16 additions & 3 deletions cinder/api/v2/volumes.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
from cinder.i18n import _
from cinder.image import glance
from cinder import objects
from cinder.policies import volume_metadata as metadata_policy
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import volume_utils
Expand Down Expand Up @@ -65,7 +66,11 @@ def show(self, req, id):
vol = self.volume_api.get(context, id, viewable_admin_meta=True)
req.cache_db_volume(vol)

api_utils.add_visible_admin_metadata(vol)
all_admin_metadata = context.authorize(
metadata_policy.GET_ADMIN_METADATA_POLICY, fatal=False)

api_utils.add_visible_admin_metadata(
vol, all_admin_metadata=all_admin_metadata)

return self._view_builder.detail(req, vol)

Expand Down Expand Up @@ -123,8 +128,12 @@ def _get_volumes(self, req, is_detail):
viewable_admin_meta=True,
offset=offset)

all_admin_metadata = context.authorize(
metadata_policy.GET_ADMIN_METADATA_POLICY, fatal=False)

for volume in volumes:
api_utils.add_visible_admin_metadata(volume)
api_utils.add_visible_admin_metadata(
volume, all_admin_metadata=all_admin_metadata)

req.cache_db_volumes(volumes.objects)

Expand Down Expand Up @@ -310,7 +319,11 @@ def update(self, req, id, body):

volume.update(update_dict)

api_utils.add_visible_admin_metadata(volume)
all_admin_metadata = context.authorize(
metadata_policy.GET_ADMIN_METADATA_POLICY, fatal=False)

api_utils.add_visible_admin_metadata(
volume, all_admin_metadata=all_admin_metadata)

volume_utils.notify_about_volume_usage(context, volume,
'update.end')
Expand Down
7 changes: 6 additions & 1 deletion cinder/api/v3/volumes.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
from cinder.i18n import _
from cinder.image import glance
from cinder import objects
from cinder.policies import volume_metadata as metadata_policy
from cinder.policies import volumes as policy
from cinder import utils

Expand Down Expand Up @@ -135,8 +136,12 @@ def _get_volumes(self, req, is_detail):
total_count = self.volume_api.calculate_resource_count(
context, 'volume', filters)

all_admin_metadata = context.authorize(
metadata_policy.GET_ADMIN_METADATA_POLICY, fatal=False)

for volume in volumes:
api_utils.add_visible_admin_metadata(volume)
api_utils.add_visible_admin_metadata(
volume, all_admin_metadata=all_admin_metadata)

req.cache_db_volumes(volumes.objects)

Expand Down
15 changes: 15 additions & 0 deletions cinder/policies/volume_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
UPDATE_POLICY = "volume:update_volume_metadata"
IMAGE_METADATA_POLICY = "volume_extension:volume_image_metadata"
UPDATE_ADMIN_METADATA_POLICY = "volume:update_volume_admin_metadata"
GET_ADMIN_METADATA_POLICY = "volume:get_admin_metadata"


BASE_POLICY_NAME = 'volume:volume_metadata:%s'
Expand Down Expand Up @@ -117,6 +118,20 @@
'path': '/volumes/{volume_id}/action (os-attach)'
}
]),
policy.DocumentedRuleDefault(
name=GET_ADMIN_METADATA_POLICY,
check_str=base.RULE_ADMIN_API,
description="get all volume admin metadata.",
operations=[
{
'method': 'GET',
'path': '/volumes/detail'
},
{
'method': 'GET',
'path': '/volumes/{volume_id}'
},
]),
]


Expand Down
28 changes: 28 additions & 0 deletions cinder/tests/unit/policies/test_volume_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,3 +193,31 @@ def test_owner_cannot_update_metadata_for_others(self, mock_volume):
response = self._get_request_response(non_owner_context, path, 'PUT',
body=body)
self.assertEqual(http_client.FORBIDDEN, response.status_int)

@mock.patch.object(volume_api.API, 'get')
def test_admin_can_get_admin_metadata(self, mock_volume):
admin_context = self.admin_context
user_context = self.user_context

admin_metadata = {"k": "v"}

volume = self._create_fake_volume(user_context,
admin_metadata=admin_metadata)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}

response = self._get_request_response(user_context, path, 'GET')

self.assertEqual(http_client.OK, response.status_int)
self.assertEqual(response.json_body['volume']['id'], volume.id)

res_meta = response.json_body['volume']['metadata']
self.assertEqual({}, res_meta)

response = self._get_request_response(admin_context, path, 'GET')
self.assertEqual(http_client.OK, response.status_int)
res_meta = response.json_body['volume']['metadata']
self.assertIn('k', res_meta)
self.assertEqual('v', res_meta['k'])
103 changes: 67 additions & 36 deletions cinder/volume/drivers/vmware/vmdk.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,7 +310,11 @@ class VMwareVcVmdkDriver(driver.VolumeDriver):
# 3.4.2.99.1 - VMware implementation of volume migration
# 3.4.2.99.2 - Added soft sharding volume migration, fixed a small issue
# in check_for_setup_error where storage_profile not set.
VERSION = '3.4.2.99.2'
# 3.4.2.99.3 - Added model_update to ensure_export and save the vmware
# vcenter uuid and datastore name and storage profile
# associated with a volume. This will help us update the host
# once we expose a datastore as a pool.
VERSION = '3.4.2.99.3'

# ThirdPartySystems wiki page
CI_WIKI_NAME = "VMware_CI"
Expand Down Expand Up @@ -521,7 +525,8 @@ def create_volume(self, volume):
if self.configuration.vmware_lazy_create:
self._verify_volume_creation(volume)
else:
self._create_backing(volume)
backing = self._create_backing(volume)
return self._volume_provider_metadata(volume, backing=backing)

def _delete_volume(self, volume):
"""Delete the volume backing if it is present.
Expand Down Expand Up @@ -612,6 +617,7 @@ def _get_extra_config(self, volume):
return {EXTRA_CONFIG_VOLUME_ID_KEY: volume['id'],
volumeops.BACKING_UUID_KEY: volume['id']}

@utils.trace
def _create_backing(self, volume, host=None, create_params=None):
"""Create volume backing under the given host.

Expand Down Expand Up @@ -880,6 +886,44 @@ def _initialize_connection(self, volume, connector):
"""
# Check that connection_capabilities match
# This ensures the connector is bound to the same vCenter service
backing = self.volumeops.get_backing(volume.name, volume.id)
return self._get_connection_info(volume, backing, connector)

@utils.trace
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.

The implementation returns the following information:

.. code-block:: default

{
'driver_volume_type': 'vmdk',
'data': {'volume': $VOLUME_MOREF_VALUE,
'volume_id': $VOLUME_ID
}
}

:param volume: Volume object
:param connector: Connector information
:return: Return connection information
"""
return self._initialize_connection(volume, connector)

@utils.trace
def terminate_connection(self, volume, connector, force=False, **kwargs):
# Checking if the connection was used to restore from a backup. In
# that case, the VMDK connector in os-brick created a new backing
# which will replace the initial one. Here we set the proper name
# and backing uuid for the new backing, because os-brick doesn't do it.
if (connector and 'platform' in connector and 'os_type' in connector
and self._is_volume_subject_to_import_vapp(volume)):
backing = self.volumeops.get_backing_by_uuid(volume['id'])

self.volumeops.rename_backing(backing, volume['name'])
self.volumeops.update_backing_disk_uuid(backing, volume['id'])

def create_export(self, context, volume, connector):
if 'connection_capabilities' in connector:
missing = set(self._get_connection_capabilities()) -\
set(connector['connection_capabilities'])
Expand Down Expand Up @@ -918,47 +962,34 @@ def _initialize_connection(self, volume, connector):
# Create backing
backing = self._create_backing(volume)

return self._get_connection_info(volume, backing, connector)
return self._volume_provider_metadata(volume, backing=backing)

@utils.trace
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.

The implementation returns the following information:

.. code-block:: default

{
'driver_volume_type': 'vmdk',
'data': {'volume': $VOLUME_MOREF_VALUE,
'volume_id': $VOLUME_ID
}
def _volume_provider_metadata(self, volume, backing=None):
if not backing:
backing = self.volumeops.get_backing(volume.name, volume.id)

ds = self.volumeops.get_datastore(backing)
summary = self.volumeops.get_summary(ds)
profile = self._get_storage_profile(volume)
vcenter_uuid = (
self.session.vim.service_content.about.instanceUuid
)
model_update = {
'admin_metadata': {
'vmware_vcenter_id': vcenter_uuid,
'vmware_ds_name': summary.name,
'vmware_profile_name': profile,
}
}

:param volume: Volume object
:param connector: Connector information
:return: Return connection information
"""
return self._initialize_connection(volume, connector)
return model_update

@utils.trace
def terminate_connection(self, volume, connector, force=False, **kwargs):
# Checking if the connection was used to restore from a backup. In
# that case, the VMDK connector in os-brick created a new backing
# which will replace the initial one. Here we set the proper name
# and backing uuid for the new backing, because os-brick doesn't do it.
if (connector and 'platform' in connector and 'os_type' in connector
and self._is_volume_subject_to_import_vapp(volume)):
backing = self.volumeops.get_backing_by_uuid(volume['id'])

self.volumeops.rename_backing(backing, volume['name'])
self.volumeops.update_backing_disk_uuid(backing, volume['id'])

def create_export(self, context, volume, connector):
pass

def ensure_export(self, context, volume):
pass
"""Called at volume startup."""
if not volume.get('volume_admin_metadata'):
return self._volume_provider_metadata(volume)

def remove_export(self, context, volume):
pass
Expand Down
12 changes: 11 additions & 1 deletion cinder/volume/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -538,13 +538,23 @@ def _init_host(self, added_to_cluster=None, **kwargs):

try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
model_update = self.driver.ensure_export(
ctxt, volume)
except Exception:
LOG.exception("Failed to re-export volume, "
"setting to ERROR.",
resource=volume)
volume.conditional_update({'status': 'error'},
{'status': 'in-use'})

try:
if model_update:
volume.update(model_update)
volume.save()
except Exception as ex:
LOG.exception("Model update failed.",
resource=volume)

# All other cleanups are processed by parent class -
# CleanableManager

Expand Down