nova云主机迁移简要流程分析

来源:互联网 发布:朝鲜中国知乎 编辑:程序博客网 时间:2024/06/05 03:20


本文基于openstack Newton版本。

 简单说明

云主机冷迁移需要保证云主机所在的物理节点是正常的,并且云主机状态是stopped以及active才可以冷迁移

简要流程:

   1、执行云主机resize接口,并校验云主机状态是否为stopped以及active

   2、发送resize rcp消息到nova.conductor

     由nova.conductor调用nova.scheduler选择一个资源足够的物理主机A进行迁移

   3、有nova.conductor指定被选中的物理主机A发送pre_resize 的rcp情求

   4、被选中的物理主机A接执行pre_resize,主要是根据flavor进行资源的预留

   5、被选中物理主机A向云主机所在的物理主机B发送resize_instance的动作

   6、物理主机B接收到resize_instance后将云主机缩拥有的资源释放(包括存储、网络)

   7、物理主机B通知被选中的物理主机A云主机资源已经释放

   8、物理主机A接收到云主机资源以及释放的rpc则将相关的资源挂载到当前环境上,并且启动云主机

 代码入口

代码入口位于nova/api/openstack/compute/migrate_server.py中

@wsgi.response(202)@extensions.expected_errors((400, 403, 404, 409))@wsgi.action('migrate')def _migrate(self, req, id, body):    """Permit admins to migrate a server to a new host."""    context = req.environ['nova.context']    context.can(ms_policies.POLICY_ROOT % 'migrate')    instance = common.get_instance(self.compute_api, context, id)    try:        self.compute_api.resize(req.environ['nova.context'], instance)    except (exception.TooManyInstances, exception.QuotaError) as e:        raise exc.HTTPForbidden(explanation=e.format_message())    except exception.InstanceIsLocked as e:        raise exc.HTTPConflict(explanation=e.format_message())    except exception.InstanceInvalidState as state_error:        common.raise_http_conflict_for_instance_invalid_state(state_error,                'migrate', id)    except exception.InstanceNotFound as e:        raise exc.HTTPNotFound(explanation=e.format_message())    except exception.NoValidHost as e:        raise exc.HTTPBadRequest(explanation=e.format_message())

调用resize接口进行迁移

代码位于nova/compute/api.py中

迁移以及resize都是走resize接口,migrate使用的是相同的flavor,resize是使用不同的flavor。

@check_instance_lock@check_instance_cell@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED]) #只支持状态为active以及stopped的云主机进行迁移def resize(self, context, instance, flavor_id=None, clean_shutdown=True,           **extra_instance_updates):    """Resize (ie, migrate) a running instance.    If flavor_id is None, the process is considered a migration, keeping    the original flavor_id. If flavor_id is not None, the instance should    be migrated to a new host and resized to the new flavor_id.    """    self._check_auto_disk_config(instance, **extra_instance_updates)    current_instance_type = instance.get_flavor()    # If flavor_id is not provided, only migrate the instance.    if not flavor_id:        LOG.debug("flavor_id is None. Assuming migration.",                  instance=instance)        new_instance_type = current_instance_type    else:        new_instance_type = flavors.get_flavor_by_flavor_id(            flavor_id, read_deleted="no")        if (new_instance_type.get('root_gb') == 0 and                    current_instance_type.get('root_gb') != 0 and                not compute_utils.is_volume_backed_instance(context,                                                            instance)):            reason = _('Resize to zero disk flavor is not allowed.')            raise exception.CannotResizeDisk(reason=reason)    if not new_instance_type:        raise exception.FlavorNotFound(flavor_id=flavor_id)    current_instance_type_name = current_instance_type['name']    new_instance_type_name = new_instance_type['name']    LOG.debug("Old instance type %(current_instance_type_name)s, "              "new instance type %(new_instance_type_name)s",              {'current_instance_type_name': current_instance_type_name,               'new_instance_type_name': new_instance_type_name},              instance=instance)    same_instance_type = (current_instance_type['id'] ==                          new_instance_type['id'])    # NOTE(sirp): We don't want to force a customer to change their flavor    # when Ops is migrating off of a failed host.    if not same_instance_type and new_instance_type.get('disabled'):        raise exception.FlavorNotFound(flavor_id=flavor_id)    if same_instance_type and flavor_id and self.cell_type != 'compute':        raise exception.CannotResizeToSameFlavor()    # ensure there is sufficient headroom for upsizes#如果flavor不一致,需要先处理配额预留    if flavor_id:        deltas = compute_utils.upsize_quota_delta(context,                                                  new_instance_type,                                                  current_instance_type)        try:            quotas = compute_utils.reserve_quota_delta(context, deltas,                                                       instance)        except exception.OverQuota as exc:            quotas = exc.kwargs['quotas']            overs = exc.kwargs['overs']            usages = exc.kwargs['usages']            headroom = self._get_headroom(quotas, usages, deltas)            (overs, reqs, total_alloweds,             useds) = self._get_over_quota_detail(headroom, overs, quotas,                                                  deltas)            LOG.warning(_LW("%(overs)s quota exceeded for %(pid)s,"                            " tried to resize instance."),                        {'overs': overs, 'pid': context.project_id})            raise exception.TooManyInstances(overs=overs,                                             req=reqs,                                             used=useds,                                             allowed=total_alloweds)    else:        quotas = objects.Quotas(context=context)    instance.task_state = task_states.RESIZE_PREP    instance.progress = 0    instance.update(extra_instance_updates)    instance.save(expected_task_state=[None])    filter_properties = {'ignore_hosts': []}    if not CONF.allow_resize_to_same_host:        filter_properties['ignore_hosts'].append(instance.host)    if self.cell_type == 'api':        # Commit reservations early and create migration record.        self._resize_cells_support(context, quotas, instance,                                   current_instance_type,                                   new_instance_type)    if not flavor_id:        self._record_action_start(context, instance,                                  instance_actions.MIGRATE)    else:        self._record_action_start(context, instance,                                  instance_actions.RESIZE)    # NOTE(sbauza): The migration script we provided in Newton should make    # sure that all our instances are currently migrated to have an    # attached RequestSpec object but let's consider that the operator only    # half migrated all their instances in the meantime.    try:        request_spec = objects.RequestSpec.get_by_instance_uuid(            context, instance.uuid)        request_spec.ignore_hosts = filter_properties['ignore_hosts']    except exception.RequestSpecNotFound:        # Some old instances can still have no RequestSpec object attached        # to them, we need to support the old way        request_spec = None    scheduler_hint = {'filter_properties': filter_properties}    self.compute_task_api.resize_instance(context, instance,                                          extra_instance_updates, scheduler_hint=scheduler_hint,                                          flavor=new_instance_type,                                          reservations=quotas.reservations or [],                                          clean_shutdown=clean_shutdown,                                          request_spec=request_spec)

进行resize调度

代码位于nova/conductor/api.py中

def resize_instance(self, context, instance, extra_instance_updates,                    scheduler_hint, flavor, reservations,                    clean_shutdown=True, request_spec=None):    # NOTE(comstud): 'extra_instance_updates' is not used here but is    # needed for compatibility with the cells_rpcapi version of this    # method.    self.conductor_compute_rpcapi.migrate_server(        context, instance, scheduler_hint, live=False, rebuild=False,#走冷迁移        flavor=flavor, block_migration=None, disk_over_commit=None,        reservations=reservations, clean_shutdown=clean_shutdown,        request_spec=request_spec)

由conductor的manager处理该rpc请求

代码位于nova/conductor/manager.py中

@messaging.expected_exceptions(    exception.NoValidHost,    exception.ComputeServiceUnavailable,    exception.ComputeHostNotFound,    exception.InvalidHypervisorType,    exception.InvalidCPUInfo,    exception.UnableToMigrateToSelf,    exception.DestinationHypervisorTooOld,    exception.InvalidLocalStorage,    exception.InvalidSharedStorage,    exception.HypervisorUnavailable,    exception.InstanceInvalidState,    exception.MigrationPreCheckError,    exception.MigrationPreCheckClientException,    exception.LiveMigrationWithOldNovaNotSupported,    exception.UnsupportedPolicyException)@wrap_instance_event(prefix='conductor')def migrate_server(self, context, instance, scheduler_hint, live, rebuild,        flavor, block_migration, disk_over_commit, reservations=None,        clean_shutdown=True, request_spec=None):    if instance and not isinstance(instance, nova_object.NovaObject):        # NOTE(danms): Until v2 of the RPC API, we need to tolerate        # old-world instance objects here        attrs = ['metadata', 'system_metadata', 'info_cache',                 'security_groups']        instance = objects.Instance._from_db_object(            context, objects.Instance(), instance,            expected_attrs=attrs)    # NOTE: Remove this when we drop support for v1 of the RPC API    if flavor and not isinstance(flavor, objects.Flavor):        # Code downstream may expect extra_specs to be populated since it        # is receiving an object, so lookup the flavor to ensure this.        flavor = objects.Flavor.get_by_id(context, flavor['id'])    if live and not rebuild and not flavor:        self._live_migrate(context, instance, scheduler_hint,                           block_migration, disk_over_commit, request_spec)    elif not live and not rebuild and flavor:        instance_uuid = instance.uuid        with compute_utils.EventReporter(context, 'cold_migrate',                                         instance_uuid):            self._cold_migrate(context, instance, flavor,                               scheduler_hint['filter_properties'],                               reservations, clean_shutdown, request_spec)    else:        raise NotImplementedError()

执行冷迁移代码

代码位于nova/nova/conductor/manager.py中

这部分代码主要是根据参数,组装并代用MiagrateTask中的方法execute.

def _cold_migrate(self, context, instance, flavor, filter_properties,                  reservations, clean_shutdown, request_spec):    image = utils.get_image_from_system_metadata(        instance.system_metadata)    # NOTE(sbauza): If a reschedule occurs when prep_resize(), then    # it only provides filter_properties legacy dict back to the    # conductor with no RequestSpec part of the payload.    if not request_spec:        # Make sure we hydrate a new RequestSpec object with the new flavor        # and not the nested one from the instance        request_spec = objects.RequestSpec.from_components(            context, instance.uuid, image,            flavor, instance.numa_topology, instance.pci_requests,            filter_properties, None, instance.availability_zone)    else:        # NOTE(sbauza): Resizes means new flavor, so we need to update the        # original RequestSpec object for make sure the scheduler verifies        # the right one and not the original flavor        request_spec.flavor = flavor    task = self._build_cold_migrate_task(context, instance, flavor,                                         request_spec,                                         reservations, clean_shutdown)    # TODO(sbauza): Provide directly the RequestSpec object once    # _set_vm_state_and_notify() accepts it    legacy_spec = request_spec.to_legacy_request_spec_dict()    try:#执行冷迁移task        task.execute()    except exception.NoValidHost as ex:        vm_state = instance.vm_state        if not vm_state:            vm_state = vm_states.ACTIVE        updates = {'vm_state': vm_state, 'task_state': None}        self._set_vm_state_and_notify(context, instance.uuid,                                      'migrate_server',                                      updates, ex, legacy_spec)        # if the flavor IDs match, it's migrate; otherwise resize        if flavor.id == instance.instance_type_id:            msg = _("No valid host found for cold migrate")        else:            msg = _("No valid host found for resize")        raise exception.NoValidHost(reason=msg)    except exception.UnsupportedPolicyException as ex:        with excutils.save_and_reraise_exception():            vm_state = instance.vm_state            if not vm_state:                vm_state = vm_states.ACTIVE            updates = {'vm_state': vm_state, 'task_state': None}            self._set_vm_state_and_notify(context, instance.uuid,                                          'migrate_server',                                          updates, ex, legacy_spec)    except Exception as ex:        with excutils.save_and_reraise_exception():            updates = {'vm_state': instance.vm_state,                       'task_state': None}            self._set_vm_state_and_notify(context, instance.uuid,                                          'migrate_server',                                          updates, ex, legacy_spec)    # NOTE(sbauza): Make sure we persist the new flavor in case we had    # a successful scheduler call if and only if nothing bad happened    if request_spec.obj_what_changed():        request_spec.save()

执行冷迁移task

代码位于nova/conductor/tasks/migrate.py

选择一个进行冷迁移的物理节点,设置迁移任务,并发送rpc到被选中的物理主机进行冷迁移。

由 task.execute()进入class MigrationTask(base.TaskBase):    def __init__(self, context, instance, flavor,                 request_spec, reservations, clean_shutdown, compute_rpcapi,                 scheduler_client):        super(MigrationTask, self).__init__(context, instance)        self.clean_shutdown = clean_shutdown        self.request_spec = request_spec        self.reservations = reservations        self.flavor = flavor        self.quotas = None        self.compute_rpcapi = compute_rpcapi        self.scheduler_client = scheduler_client    def _execute(self):        image = self.request_spec.image        self.quotas = objects.Quotas.from_reservations(self.context,                                                       self.reservations,                                                       instance=self.instance)        # TODO(sbauza): Remove that once prep_resize() accepts a  RequestSpec        # object in the signature and all the scheduler.utils methods too        legacy_spec = self.request_spec.to_legacy_request_spec_dict()        legacy_props = self.request_spec.to_legacy_filter_properties_dict()        scheduler_utils.setup_instance_group(self.context, legacy_spec,                                             legacy_props)        scheduler_utils.populate_retry(legacy_props,                                       self.instance.uuid)        # TODO(sbauza): Remove that RequestSpec rehydratation once        # scheduler.utils methods use directly the NovaObject.        self.request_spec = objects.RequestSpec.from_components(            self.context, self.instance.uuid, image,            self.flavor, self.instance.numa_topology,            self.instance.pci_requests, legacy_props, None,            self.instance.availability_zone)        # NOTE(sbauza): Force_hosts/nodes needs to be reset        # if we want to make sure that the next destination        # is not forced to be the original host        self.request_spec.reset_forced_destinations()# 通过nova.scheculer选择一个可以调度的物理主机        hosts = self.scheduler_client.select_destinations(            self.context, self.request_spec)        host_state = hosts[0]        scheduler_utils.populate_filter_properties(legacy_props,                                                   host_state)        # context is not serializable        legacy_props.pop('context', None)        (host, node) = (host_state['host'], host_state['nodename'])        # FIXME(sbauza): Serialize/Unserialize the legacy dict because of        # oslo.messaging #1529084 to transform datetime values into strings.        # tl;dr: datetimes in dicts are not accepted as correct values by the        # rpc fake driver.        legacy_spec = jsonutils.loads(jsonutils.dumps(legacy_spec))        self.compute_rpcapi.prep_resize(            self.context, self.instance, legacy_spec['image'],            self.flavor, host, self.reservations,            request_spec=legacy_spec, filter_properties=legacy_props,            node=node, clean_shutdown=self.clean_shutdown)    def rollback(self):        if self.quotas:            self.quotas.rollback()

执行prep_resize

代码位于nova/compute/manager.py中

到被选中的物理节点进行冷迁移预处理动作。

主要是进行一些资源的预留处理。

@wrap_exception()@reverts_task_state@wrap_instance_event(prefix='compute')@wrap_instance_faultdef prep_resize(self, context, image, instance, instance_type,                reservations, request_spec, filter_properties, node,                clean_shutdown):    """Initiates the process of moving a running instance to another host.    Possibly changes the RAM and disk size in the process.    """    if node is None:        node = self.driver.get_available_nodes(refresh=True)[0]        LOG.debug("No node specified, defaulting to %s", node,                  instance=instance)    # NOTE(melwitt): Remove this in version 5.0 of the RPC API    # Code downstream may expect extra_specs to be populated since it    # is receiving an object, so lookup the flavor to ensure this.    if not isinstance(instance_type, objects.Flavor):        instance_type = objects.Flavor.get_by_id(context,                                                 instance_type['id'])    quotas = objects.Quotas.from_reservations(context,                                              reservations,                                              instance=instance)    with self._error_out_instance_on_exception(context, instance,                                               quotas=quotas):        compute_utils.notify_usage_exists(self.notifier, context, instance,                                          current_period=True)        self._notify_about_instance_usage(                context, instance, "resize.prep.start")        try:            self._prep_resize(context, image, instance,                              instance_type, quotas,                              request_spec, filter_properties,                              node, clean_shutdown)        # NOTE(dgenin): This is thrown in LibvirtDriver when the        #               instance to be migrated is backed by LVM.        #               Remove when LVM migration is implemented.        except exception.MigrationPreCheckError:            raise        except Exception:            # try to re-schedule the resize elsewhere:            exc_info = sys.exc_info()            self._reschedule_resize_or_reraise(context, image, instance,                    exc_info, instance_type, quotas, request_spec,                    filter_properties)        finally:            extra_usage_info = dict(                    new_instance_type=instance_type.name,                    new_instance_type_id=instance_type.id)            self._notify_about_instance_usage(                context, instance, "resize.prep.end",                extra_usage_info=extra_usage_info)
def _prep_resize(self, context, image, instance, instance_type,        quotas, request_spec, filter_properties, node,        clean_shutdown=True):    if not filter_properties:        filter_properties = {}    if not instance.host:        self._set_instance_obj_error_state(context, instance)        msg = _('Instance has no source host')        raise exception.MigrationError(reason=msg)    same_host = instance.host == self.host    # if the flavor IDs match, it's migrate; otherwise resize    if same_host and instance_type.id == instance['instance_type_id']:        # check driver whether support migrate to same host        if not self.driver.capabilities['supports_migrate_to_same_host']:            raise exception.UnableToMigrateToSelf(                instance_id=instance.uuid, host=self.host)    # NOTE(danms): Stash the new instance_type to avoid having to    # look it up in the database later    instance.new_flavor = instance_type    # NOTE(mriedem): Stash the old vm_state so we can set the    # resized/reverted instance back to the same state later.    vm_state = instance.vm_state    LOG.debug('Stashing vm_state: %s', vm_state, instance=instance)    instance.system_metadata['old_vm_state'] = vm_state    instance.save()    limits = filter_properties.get('limits', {})    rt = self._get_resource_tracker(node)    with rt.resize_claim(context, instance, instance_type,                         image_meta=image, limits=limits) as claim:        LOG.info(_LI('Migrating'), context=context, instance=instance)        self.compute_rpcapi.resize_instance(                context, instance, claim.migration, image,                instance_type, quotas.reservations,                clean_shutdown)

执行resize_instance

代码位于nova/compute/rpcapi.py

该代码的主要作用是选择云主机所在的物理节点,将resize的rpc通知到该物理主机。

def resize_instance(self, ctxt, instance, migration, image, instance_type,                    reservations=None, clean_shutdown=True):    msg_args = {'instance': instance, 'migration': migration,                'image': image, 'reservations': reservations,                'instance_type': instance_type,                'clean_shutdown': clean_shutdown,    }    version = '4.1'    client = self.router.by_instance(ctxt, instance)    if not client.can_send_version(version):        msg_args['instance_type'] = objects_base.obj_to_primitive(                                        instance_type)        version = '4.0'    cctxt = client.prepare(server=_compute_host(None, instance),            version=version)    cctxt.cast(ctxt, 'resize_instance', **msg_args)def _compute_host(host, instance):    '''Get the destination host for a message.    :param host: explicit host to send the message to.    :param instance: If an explicit host was not specified, use                     instance['host']    :returns: A host    '''    if host:        return host    if not instance:        raise exception.NovaException(_('No compute host specified'))    if not instance.host:        raise exception.NovaException(_('Unable to find host for '                                        'Instance %s') % instance.uuid)    return instance.host

代码位于nova/compute/manager.py

到云主机所在的节点执行云主机迁移的动作

涉及到存储、网络的迁移

@wrap_exception()@reverts_task_state@wrap_instance_event(prefix='compute')@errors_out_migration@wrap_instance_faultdef resize_instance(self, context, instance, image,                    reservations, migration, instance_type,                    clean_shutdown):    """Starts the migration of a running instance to another host."""    quotas = objects.Quotas.from_reservations(context,                                              reservations,                                              instance=instance)    with self._error_out_instance_on_exception(context, instance,                                               quotas=quotas):        # TODO(chaochin) Remove this until v5 RPC API        # Code downstream may expect extra_specs to be populated since it        # is receiving an object, so lookup the flavor to ensure this.        if (not instance_type or            not isinstance(instance_type, objects.Flavor)):            instance_type = objects.Flavor.get_by_id(                context, migration['new_instance_type_id'])        network_info = self.network_api.get_instance_nw_info(context,                                                             instance)        migration.status = 'migrating'        with migration.obj_as_admin():            migration.save()        instance.task_state = task_states.RESIZE_MIGRATING        instance.save(expected_task_state=task_states.RESIZE_PREP)        self._notify_about_instance_usage(            context, instance, "resize.start", network_info=network_info)        compute_utils.notify_about_instance_action(context, instance,               self.host, action=fields.NotificationAction.RESIZE,               phase=fields.NotificationPhase.START)        bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(                context, instance.uuid)        block_device_info = self._get_instance_block_device_info(                            context, instance, bdms=bdms)        timeout, retry_interval = self._get_power_off_values(context,                                        instance, clean_shutdown)        disk_info = self.driver.migrate_disk_and_power_off(                context, instance, migration.dest_host,                instance_type, network_info,                block_device_info,                timeout, retry_interval)        self._terminate_volume_connections(context, instance, bdms)        migration_p = obj_base.obj_to_primitive(migration)        self.network_api.migrate_instance_start(context,                                                instance,                                                migration_p)        migration.status = 'post-migrating'        with migration.obj_as_admin():            migration.save()        instance.host = migration.dest_compute        instance.node = migration.dest_node        instance.task_state = task_states.RESIZE_MIGRATED        instance.save(expected_task_state=task_states.RESIZE_MIGRATING)        self.compute_rpcapi.finish_resize(context, instance,                migration, image, disk_info,                migration.dest_compute, reservations=quotas.reservations)        self._notify_about_instance_usage(context, instance, "resize.end",                                          network_info=network_info)        compute_utils.notify_about_instance_action(context, instance,               self.host, action=fields.NotificationAction.RESIZE,               phase=fields.NotificationPhase.END)        self.instance_events.clear_events_for_instance(instance)

通知迁移云主机的物理迁移完成

代码位于nova/compute/manager.py

到云主机的最终目的物理主机上,将网络、存储挂载到云主机上

def finish_resize(self, ctxt, instance, migration, image, disk_info,        host, reservations=None):    version = '4.0'    cctxt = self.router.by_host(ctxt, host).prepare(            server=host, version=version)    cctxt.cast(ctxt, 'finish_resize',               instance=instance, migration=migration,               image=image, disk_info=disk_info, reservations=reservations)@wrap_exception()@reverts_task_state@wrap_instance_event(prefix='compute')@errors_out_migration@wrap_instance_faultdef finish_resize(self, context, disk_info, image, instance,                  reservations, migration):    """Completes the migration process.    Sets up the newly transferred disk and turns on the instance at its    new host machine.    """    quotas = objects.Quotas.from_reservations(context,                                              reservations,                                              instance=instance)    try:        image_meta = objects.ImageMeta.from_dict(image)        self._finish_resize(context, instance, migration,                            disk_info, image_meta)        quotas.commit()    except Exception:        LOG.exception(_LE('Setting instance vm_state to ERROR'),                      instance=instance)        with excutils.save_and_reraise_exception():            try:                quotas.rollback()            except Exception:                LOG.exception(_LE("Failed to rollback quota for failed "                                  "finish_resize"),                              instance=instance)            self._set_instance_obj_error_state(context, instance)

def _finish_resize(self, context, instance, migration, disk_info,                   image_meta):    resize_instance = False    old_instance_type_id = migration['old_instance_type_id']    new_instance_type_id = migration['new_instance_type_id']    old_instance_type = instance.get_flavor()    # NOTE(mriedem): Get the old_vm_state so we know if we should    # power on the instance. If old_vm_state is not set we need to default    # to ACTIVE for backwards compatibility    old_vm_state = instance.system_metadata.get('old_vm_state',                                                vm_states.ACTIVE)    instance.old_flavor = old_instance_type    if old_instance_type_id != new_instance_type_id:        instance_type = instance.get_flavor('new')        self._set_instance_info(instance, instance_type)        for key in ('root_gb', 'swap', 'ephemeral_gb'):            if old_instance_type[key] != instance_type[key]:                resize_instance = True                break    instance.apply_migration_context()    # NOTE(tr3buchet): setup networks on destination host    self.network_api.setup_networks_on_host(context, instance,                                            migration['dest_compute'])    migration_p = obj_base.obj_to_primitive(migration)    self.network_api.migrate_instance_finish(context,                                             instance,                                             migration_p)    network_info = self.network_api.get_instance_nw_info(context, instance)    instance.task_state = task_states.RESIZE_FINISH    instance.save(expected_task_state=task_states.RESIZE_MIGRATED)    self._notify_about_instance_usage(        context, instance, "finish_resize.start",        network_info=network_info)    block_device_info = self._get_instance_block_device_info(                        context, instance, refresh_conn_info=True)    # NOTE(mriedem): If the original vm_state was STOPPED, we don't    # automatically power on the instance after it's migrated    power_on = old_vm_state != vm_states.STOPPED    try:        self.driver.finish_migration(context, migration, instance,                                     disk_info,                                     network_info,                                     image_meta, resize_instance,                                     block_device_info, power_on)    except Exception:        with excutils.save_and_reraise_exception():            if old_instance_type_id != new_instance_type_id:                self._set_instance_info(instance,                                        old_instance_type)    migration.status = 'finished'    with migration.obj_as_admin():        migration.save()    instance.vm_state = vm_states.RESIZED    instance.task_state = None    instance.launched_at = timeutils.utcnow()    instance.save(expected_task_state=task_states.RESIZE_FINISH)    self._update_scheduler_instance_info(context, instance)    self._notify_about_instance_usage(        context, instance, "finish_resize.end",        network_info=network_info)