基于ceph的cinder backup功能代码分析
来源:互联网 发布:720度全景制作软件 编辑:程序博客网 时间:2024/06/10 00:38
1、cinder/backup/api.py 239 create687
def create(self, context, name, description, volume_id, container, incremental=False, availability_zone=None, force=False, snapshot_id=None):
检查磁盘状态,磁盘快照以及快照状态以及in-use时是否加了force,检查backup配额,由于我们使用的是ceph的driver,获取dirver的过程就不看了主要看看实现的地方
2、cinder/backup/drivers/ceph.py 840 backup函数
def backup(self, backup, volume_file, backup_metadata=True): """Backup volume and metadata (if available) to Ceph object store. If the source volume is an RBD we will attempt to do an incremental/differential backup, otherwise a full copy is performed. If this fails we will attempt to fall back to full copy. """ backup_id = backup['id'] volume = self.db.volume_get(self.context, backup['volume_id']) volume_id = volume['id'] volume_name = volume['name'] LOG.debug("Starting backup of volume='%s'.", volume_id) # Ensure we are at the beginning of the volume volume_file.seek(0) length = self._get_volume_size_gb(volume) do_full_backup = False if self._file_is_rbd(volume_file): # If volume an RBD, attempt incremental backup. try: self._backup_rbd(backup_id, volume_id, volume_file, volume_name, length) except exception.BackupRBDOperationFailed: LOG.debug("Forcing full backup of volume %s.", volume_id) do_full_backup = True else: do_full_backup = True if do_full_backup: self._full_backup(backup_id, volume_id, volume_file, volume_name, length) backup.container = self._ceph_backup_pool backup.save() if backup_metadata: try: self._backup_metadata(backup) except exception.BackupOperationError: with excutils.save_and_reraise_exception(): # Cleanup. self.delete(backup) LOG.debug("Backup '%(backup_id)s' of volume %(volume_id)s finished.", {'backup_id': backup_id, 'volume_id': volume_id})
3、cinder/backup/drivers/ceph.py 589 _backup_rbd函数
def _backup_rbd(self, backup_id, volume_id, volume_file, volume_name, length): """Create an incremental backup from an RBD image.""" rbd_user = volume_file.rbd_user rbd_pool = volume_file.rbd_pool rbd_conf = volume_file.rbd_conf source_rbd_image = volume_file.rbd_image # Identify our --from-snap point (if one exists) from_snap = self._get_most_recent_snap(source_rbd_image) LOG.debug("Using --from-snap '%(snap)s' for incremental backup of " "volume %(volume)s.", {'snap': from_snap, 'volume': volume_id}) base_name = self._get_backup_base_name(volume_id, diff_format=True) image_created = False with rbd_driver.RADOSClient(self, self._ceph_backup_pool) as client: # If from_snap does not exist at the destination (and the # destination exists), this implies a previous backup has failed. # In this case we will force a full backup. # # TODO(dosaboy): find a way to repair the broken backup # if base_name not in self.rbd.RBD().list(ioctx=client.ioctx): # If a from_snap is defined but the base does not exist, we # ignore it since it is stale and waiting to be cleaned up. if from_snap: LOG.debug("Source snapshot '%(snapshot)s' of volume " "%(volume)s is stale so deleting.", {'snapshot': from_snap, 'volume': volume_id}) source_rbd_image.remove_snap(from_snap) from_snap = None # Create new base image #创建基础镜像用于后期数据拷贝过来 self._create_base_image(base_name, length, client) image_created = True else: # If a from_snap is defined but does not exist in the back base # then we cannot proceed (see above) if not self._snap_exists(base_name, from_snap, client): errmsg = (_("Snapshot='%(snap)s' does not exist in base " "image='%(base)s' - aborting incremental " "backup") % {'snap': from_snap, 'base': base_name}) LOG.info(errmsg) # Raise this exception so that caller can try another # approach raise exception.BackupRBDOperationFailed(errmsg) # Snapshot source volume so that we have a new point-in-time new_snap = self._get_new_snap_name(backup_id) LOG.debug("Creating backup snapshot='%s'", new_snap) source_rbd_image.create_snap(new_snap) # Attempt differential backup. If this fails, perhaps because librbd # or Ceph cluster version does not support it, do a full backup # instead. # # TODO(dosaboy): find a way to determine if the operation is supported # rather than brute force approach. try: before = time.time() self._rbd_diff_transfer(volume_name, rbd_pool, base_name, self._ceph_backup_pool, src_user=rbd_user, src_conf=rbd_conf, dest_user=self._ceph_backup_user, dest_conf=self._ceph_backup_conf, src_snap=new_snap, from_snap=from_snap) LOG.debug("Differential backup transfer completed in %.4fs", (time.time() - before)) # We don't need the previous snapshot (if there was one) anymore so # delete it. if from_snap: source_rbd_image.remove_snap(from_snap) except exception.BackupRBDOperationFailed: with excutils.save_and_reraise_exception(): LOG.debug("Differential backup transfer failed") # Clean up if image was created as part of this operation if image_created: self._try_delete_base_image(backup_id, volume_id, base_name=base_name) # Delete snapshot LOG.debug("Deleting diff backup snapshot='%(snapshot)s' of " "source volume='%(volume)s'.", {'snapshot': new_snap, 'volume': volume_id}) source_rbd_image.remove_snap(new_snap)
4、cinder/backup/drivers/ceph.py 515 _rbd_diff_transfer函数
这里才是真正干活的地方
def _rbd_diff_transfer(self, src_name, src_pool, dest_name, dest_pool,src_user, src_conf, dest_user, dest_conf,src_snap=None, from_snap=None): """Copy only extents changed between two points. If no snapshot is provided, the diff extents will be all those changed since the rbd volume/base was created, otherwise it will be those changed since the snapshot was created. """ LOG.debug("Performing differential transfer from '%(src)s' to " "'%(dest)s'", {'src': src_name, 'dest': dest_name}) # NOTE(dosaboy): Need to be tolerant of clusters/clients that do # not support these operations since at the time of writing they # were very new. src_ceph_args = self._ceph_args(src_user, src_conf, pool=src_pool) dest_ceph_args = self._ceph_args(dest_user, dest_conf, pool=dest_pool) cmd1 = ['rbd', 'export-diff'] + src_ceph_args if from_snap is not None: cmd1.extend(['--from-snap', from_snap]) if src_snap: path = utils.convert_str("%s/%s@%s" % (src_pool, src_name, src_snap)) else: path = utils.convert_str("%s/%s" % (src_pool, src_name)) cmd1.extend([path, '-']) cmd2 = ['rbd', 'import-diff'] + dest_ceph_args rbd_path = utils.convert_str("%s/%s" % (dest_pool, dest_name)) cmd2.extend(['-', rbd_path]) ret, stderr = self._piped_execute(cmd1, cmd2) if ret: msg = (_("RBD diff op failed - (ret=%(ret)s stderr=%(stderr)s)") % {'ret': ret, 'stderr': stderr}) LOG.info(msg) raise exception.BackupRBDOperationFailed(msg)
其实基于ceph后端的cinder backup最终重要的是两个三个地方:
1、对volume先做一个全量拷贝
2、对volume的拷贝做快照
rbd create voulmes/test rbd snap create volumes/test@snap
3、在快照的基础上做增量快照
导出增量部分rbd export-diff volumes/test@snap snap1创建一个新的镜像rbd create backup/volume-$volume-id.backup.base导入增量部分rbd import-diff snap1 backup/volume-$volume-id.backup.base
完成上述步骤即可完成增量导入
阅读全文
0 0
- 基于ceph的cinder backup功能代码分析
- cinder-backup细致分析
- cinder 调度代码分析
- [OpenStack 存储] Nova,Glance与Cinder 基于Ceph的统一存储方案
- [OpenStack 存储] Nova,Glance与Cinder 基于Ceph的统一存储方案
- ceph的pool创建流程--代码分析
- cinder-backup程序流程
- cinder-backup详细介绍
- cinder-backup驱动配置
- cinder 创建backup volume
- Openstack 中cinder backup三种backend的对比
- 使用Alluxio加速基于Ceph对象存储的数据分析
- Openstack Cinder 服务启动代码分析
- cinder-backup启动过程跟踪
- Ceph代码分析---线程池
- cinder创建volume的流程分析
- 基于docker的ceph集群
- cinder的delete操作及与底层ceph的调用关系
- (嵌入式)关于arm中的存储控制器(一)
- 在struts2框架中配置validate中出现404问题
- VIM列编辑
- 解决win10 composer xdebug 冲突
- [绍棠_swift] Swift中的继承、构造器
- 基于ceph的cinder backup功能代码分析
- Excel中时间戳转换时间
- HDU--dp练习--1009--Big Event in HDU
- POJ 1470 Closest Common Ancestors (LCA)
- Object类
- CSS 相对|绝对(relative/absolute)定位系列(一)
- Three.js描绘点线面 (含满天星空Demo)
- Atitit 项目战略之道 attilax著
- wildfly10.1 配置HTTPS(Lets Encrypt 免费获取SSL)