TFS文件系统写文件分析
来源:互联网 发布:如何通过网络看电视 编辑:程序博客网 时间:2024/05/23 16:56
TFS文件系统写入文件分析
对TFS文件系统的写入文件的关键代码做下分析.
(1). 写文件消息包函数
int DataManagement::write_data(const WriteDataInfo& write_info, const int32_t lease_id, int32_t& version,
const char* data_buffer, UpdateBlockType& repair)
{
TBSYS_LOG(DEBUG,
"write data. blockid: %u, fileid: %" PRI64_PREFIX "u, filenumber: %" PRI64_PREFIX "u, lease: %d",
write_info.block_id_, write_info.file_id_, write_info.file_number_, lease_id);
//if the first fragment, check version
if (0 == write_info.offset_)
{
//如果是初始,首先获得逻辑块
LogicBlock* logic_block = BlockFileManager::get_instance()->get_logic_block(write_info.block_id_);
if (NULL == logic_block)
{
TBSYS_LOG(ERROR, "blockid: %u is not exist.", write_info.block_id_);
return EXIT_NO_LOGICBLOCK_ERROR;
}
int ret = logic_block->check_block_version(version, repair);
if (TFS_SUCCESS != ret)
{
TBSYS_LOG(DEBUG, "check_block_version error. blockid: %u, ret: %d", write_info.block_id_, ret);
return ret;
}
}
//通过文件号从data_file_map_中进行查找
data_file_mutex_.lock();
DataFileMapIter bit = data_file_map_.find(write_info.file_number_);
DataFile* datafile = NULL;
if (bit != data_file_map_.end())
{
datafile = bit->second;
}
else
{
//control datafile size
if (data_file_map_.size() >= static_cast<uint32_t> (SYSPARAM_DATASERVER.max_datafile_nums_))
{
TBSYS_LOG(ERROR, "blockid: %u, datafile nums: %u is large than default.", write_info.block_id_,data_file_map_.size());
data_file_mutex_.unlock();
return EXIT_DATAFILE_OVERLOAD;
}
//找不到的话,插入到data_file_map_中
datafile = new DataFile(write_info.file_number_);
data_file_map_.insert(DataFileMap::value_type(write_info.file_number_, datafile));
}
if (NULL == datafile)
{
TBSYS_LOG(ERROR, "datafile is null. blockid: %u, fileid: %" PRI64_PREFIX "u, filenumber: %" PRI64_PREFIX "u",
write_info.block_id_, write_info.file_id_, write_info.file_number_);
data_file_mutex_.unlock();
return EXIT_DATA_FILE_ERROR;
}
//设置数据文件最后更新时间
datafile->set_last_update();
data_file_mutex_.unlock();
//写入到数据文件中,参数:写入的数据长度,偏移量
int32_t write_len = datafile->set_data(data_buffer, write_info.length_, write_info.offset_);
if (write_len != write_info.length_)
{
TBSYS_LOG(
ERROR,
"Datafile write error. blockid: %u, fileid: %" PRI64_PREFIX "u, filenumber: %" PRI64_PREFIX "u, req writelen: %d, actual writelen: %d",
write_info.block_id_, write_info.file_id_, write_info.file_number_, write_info.length_, write_len);
erase_data_file(write_info.file_number_);
return EXIT_DATA_FILE_ERROR;
}
return TFS_SUCCESS;
}
当将文件分消息包写入时,具体的过程是写入一个临时文件中:
int DataFile::set_data(const char *data, const int32_t len, const int32_t offset)
{
if (len <= 0)
{
return TFS_SUCCESS;
}
//相当于累计写入的字节数
int32_t length = offset + len;
//WRITE_DATA_TMPBUF_SIZE=2M,以这个大小作为写入文件的缓冲区
if (length > WRITE_DATA_TMPBUF_SIZE) // write to file if length is large then max_read_size
{
if (fd_ == -1)
{
fd_ = open(tmp_file_name_, O_RDWR | O_CREAT | O_TRUNC, 0600);
if (fd_ == -1)
{
TBSYS_LOG(ERROR, "open file fail: %s, %s", tmp_file_name_, strerror(errno));
return TFS_ERROR;
}
//先将之前的内容写完
if (write(fd_, data_, length_) != length_)
{
TBSYS_LOG(ERROR, "write file fail: %s, length_: %d, error:%s", tmp_file_name_, length_, strerror(errno));
return TFS_ERROR;
}
}
if (lseek(fd_, offset, SEEK_SET) == -1)
{
TBSYS_LOG(ERROR, "lseek file fail: %s, offset: %d", tmp_file_name_, offset);
return TFS_ERROR;
}
if (write(fd_, data, len) != len)
{
TBSYS_LOG(ERROR, "write file fail: %s, len: %d", tmp_file_name_, len);
return TFS_ERROR;
}
}
else
{
memcpy(data_ + offset, data, len);
}
// 记录前一次写入的长度
if (length > length_)
{
length_ = length;
}
return len;
}
(2).写文件消息包完毕后,在执行关闭写操作,这个时候文件才实际从临时文件写入到数据块中
//写文件完毕后关闭操作
int DataManagement::close_write_file(const CloseFileInfo& closefileInfo, int32_t& write_file_size)
{
uint32_t block_id = closefileInfo.block_id_;
uint64_t file_id = closefileInfo.file_id_;
uint64_t file_number = closefileInfo.file_number_;
uint32_t crc = closefileInfo.crc_;
TBSYS_LOG(DEBUG,
"close write file, blockid: %u, fileid: %" PRI64_PREFIX "u, filenumber: %" PRI64_PREFIX "u, crc: %u",
block_id, file_id, file_number, crc);
//find datafile
DataFile* datafile = NULL;
data_file_mutex_.lock();
DataFileMapIter bit = data_file_map_.find(file_number);
if (bit != data_file_map_.end())
{
datafile = bit->second;
}
//lease expire
if (NULL == datafile)
{
TBSYS_LOG(ERROR, "Datafile is null. blockid: %u, fileid: %" PRI64_PREFIX "u, filenumber: %" PRI64_PREFIX "u",
block_id, file_id, file_number);
data_file_mutex_.unlock();
return EXIT_DATAFILE_EXPIRE_ERROR;
}
datafile->set_last_update();
datafile->add_ref();
data_file_mutex_.unlock();
//比较循环校验码
uint32_t datafile_crc = datafile->get_crc();
if (crc != datafile_crc)
{
TBSYS_LOG(
ERROR,
"Datafile crc error. blockid: %u, fileid: %" PRI64_PREFIX "u, filenumber: %" PRI64_PREFIX "u, local crc: %u, msg crc: %u",
block_id, file_id, file_number, datafile_crc, crc);
datafile->sub_ref();
erase_data_file(file_number);
return EXIT_DATA_FILE_ERROR;
}
write_file_size = datafile->get_length();
//先查找逻辑块
LogicBlock* logic_block = BlockFileManager::get_instance()->get_logic_block(block_id);
if (NULL == logic_block)
{
datafile->sub_ref();
erase_data_file(file_number);
TBSYS_LOG(ERROR, "blockid: %u is not exist.", block_id);
return EXIT_NO_LOGICBLOCK_ERROR;
}
int64_t time_start = tbsys::CTimeUtil::getTime();
//逻辑块关闭写文件
int ret = logic_block->close_write_file(file_id, datafile, datafile_crc);
if (TFS_SUCCESS != ret)
{
datafile->sub_ref();
erase_data_file(file_number);
return ret;
}
int64_t time_end = tbsys::CTimeUtil::getTime();
//记录写超时日志
if (time_end - time_start > SYSPARAM_DATASERVER.max_io_warn_time_)
{
TBSYS_LOG(WARN, "write file cost time: blockid: %u, fileid: %" PRI64_PREFIX "u, cost time: %" PRI64_PREFIX "d",
block_id, file_id, time_end - time_start);
}
// success, gc datafile
// close tmp file, release opened file handle
// datafile , bit->second point to same thing, once delete
// bit->second, datafile will be obseleted immediately.
// 删除文件号对应的迭代器,删除临时文件
datafile->sub_ref();
erase_data_file(file_number);
return TFS_SUCCESS;
}
(3). 逻辑块关闭写文件
//逻辑块关闭写文件操作
int LogicBlock::close_write_file(const uint64_t inner_file_id, DataFile* datafile, const uint32_t crc)
{
ScopedRWLock scoped_lock(rw_lock_, WRITE_LOCKER);
RawMeta file_meta;
//首先根据文件号判断是否已经写入
int ret = index_handle_->read_segment_meta(inner_file_id, file_meta);
if (TFS_SUCCESS == ret)
{
TBSYS_LOG(INFO, "file exist, update! blockid: %u, fileid: %" PRI64_PREFIX "u", logic_block_id_, inner_file_id);
}
RawMeta bak_file_meta(file_meta);
//获得要写入的文件长度
int32_t file_size = datafile->get_length();
FileInfo tfs_file_info, old_file_info;
tfs_file_info.id_ = inner_file_id;
tfs_file_info.size_ = file_size + sizeof(FileInfo);
tfs_file_info.flag_ = 0;
tfs_file_info.modify_time_ = time(NULL);
tfs_file_info.create_time_ = time(NULL);
tfs_file_info.crc_ = crc;
bool need_update_meta = true, commit_offset = true;
//commit
OperType oper_type = C_OPER_UPDATE;
int32_t old_size = 0, block_offset = 0;
//如果是第一次写文件,显然不满足条件,index_handle_->read_segment_meta(inner_file_id, file_meta)取不到值的
//第二次对同一文件作修改操作,index_handle_->read_segment_meta(inner_file_id, file_meta)就能取到值
if (file_meta.get_file_id() == inner_file_id)
{
TBSYS_LOG(INFO, "write file. fileid equal. blockid: %u, fileid: %" PRI64_PREFIX "u", logic_block_id_,
inner_file_id);
//取得旧文件的FileInfo old_file_info信息
ret = data_handle_->read_segment_info(&old_file_info, file_meta.get_offset());
if (TFS_SUCCESS != ret)
{
TBSYS_LOG(ERROR, "read FileInfo fail, blockid: %u, fileid: %" PRI64_PREFIX "u, ret: %d", logic_block_id_,
inner_file_id, ret);
}
else //read successful
{
//获得旧文件的创建时间来作为重新写入文件的创建时间
tfs_file_info.create_time_ = old_file_info.create_time_;
//获得重新写入文件需要写入的尺寸
int32_t require_size = file_size + sizeof(FileInfo);
if (require_size > old_file_info.usize_)
{
old_size = old_file_info.usize_;
//如果新尺寸大于旧文件的尺寸,那么重新写入的文件在索引文件偏移量设置为最后,追加写入
block_offset = index_handle_->get_block_data_offset();
TBSYS_LOG(
INFO,
"update file. require size: %d > origin size: %d. need reallocate, blockid: %u, fileid: %" PRI64_PREFIX "u, offset: %d",
require_size, old_size, logic_block_id_, inner_file_id, block_offset);
tfs_file_info.offset_ = block_offset;
tfs_file_info.usize_ = require_size;
file_meta.set_offset(block_offset); //偏移量设置在索引中追加最后写入
}
else
{
commit_offset = false; //索引文件的偏移量不必更新
//如果小于旧文件的尺寸,重新写入文件的偏移量沿用旧文件的偏移量
tfs_file_info.offset_ = file_meta.get_offset();
tfs_file_info.usize_ = old_file_info.usize_;
}
//modify meta size
file_meta.set_size(require_size);
index_handle_->update_segment_meta(file_meta.get_key(), file_meta);
}
}
//写入第一个文件是oper_type = C_OPER_INSERT
//更新文件是两者是相等的
if (file_meta.get_file_id() != inner_file_id || ret)
{
need_update_meta = false;
oper_type = C_OPER_INSERT;
old_size = 0;
tfs_file_info.usize_ = file_size + sizeof(FileInfo);
//得到索引块的偏移量:初始新增文件时块偏移位置是
block_offset = index_handle_->get_block_data_offset();
tfs_file_info.offset_ = block_offset;
file_meta.set_key(inner_file_id);
file_meta.set_size(file_size + sizeof(FileInfo));
file_meta.set_offset(block_offset);
//索引文件中写入元数据
ret = index_handle_->write_segment_meta(file_meta.get_key(), file_meta);
if (TFS_SUCCESS != ret)
{
return ret;
}
}
char* tmp_data_buffer = NULL;
int32_t read_len = 0, read_offset = 0;
int32_t write_len = 0, write_offset = 0;
do
{
//参数:源文件大小+sizeof(FileInfo),在块中的偏移量
//块的组织形式: 物理主块+多个物理扩展块
//从而形成块前缀的这种关系:
//uint32_t logic_blockid_; //逻辑块id
//uint32_t prev_physic_blockid_; //前一个物理块id
//uint32_t next_physic_blockid_; //后一个物理块id
ret = extend_block(file_meta.get_size(), file_meta.get_offset());
if (TFS_SUCCESS != ret)
break;
//从临时数据文件读取数据
while ((tmp_data_buffer = datafile->get_data(NULL, &read_len, read_offset)) != NULL)
{
if (read_len < 0 || (read_len + read_offset) > file_size)
{
TBSYS_LOG(ERROR, "getdata fail, blockid: %u, fileid: %" PRI64_PREFIX "u, size: %d, offset: %d, rlen: %d",
logic_block_id_, inner_file_id, file_size, read_offset, read_len);
ret = TFS_ERROR;
break;
}
if (0 == read_len)
{
break;
}
write_len = read_len;
if (0 == read_offset)
{
write_len += sizeof(FileInfo);
}
if (write_offset + write_len > file_meta.get_size())
{
ret = EXIT_WRITE_OFFSET_ERROR;
break;
}
//先写FileInfo结构信息
//file_meta.get_offset():第一次写入为,第二次写入值是第一次的file_size + sizeof(FileInfo),
//第三次写入又是累计第一次+第二次的值作为偏移量
if (0 == read_offset)
{
char* tmp_write_buffer = new char[write_len];
memcpy(tmp_write_buffer, &tfs_file_info, sizeof(FileInfo));
memcpy(tmp_write_buffer + sizeof(FileInfo), tmp_data_buffer, read_len);
ret = data_handle_->write_segment_data(tmp_write_buffer, write_len, file_meta.get_offset() + write_offset);
delete[] tmp_write_buffer;
}
else
{
//在写实际的数据文件
ret = data_handle_->write_segment_data(tmp_data_buffer, write_len, file_meta.get_offset() + write_offset);
}
//check ret(if disk error)
if (TFS_SUCCESS != ret)
{
TBSYS_LOG(
ERROR,
"blockid: %u write data error, fileid: %" PRI64_PREFIX "u, size: %d offset: %d, rlen: %d, oldsize: %d, ret: %d",
logic_block_id_, inner_file_id, file_size, read_offset, read_len, old_size, ret);
break;
}
read_offset += read_len;
write_offset += write_len;
}
if (TFS_SUCCESS != ret)
break;
if (oper_type == C_OPER_INSERT)
{
ret = index_handle_->update_block_info(C_OPER_INSERT, file_meta.get_size());
if (TFS_SUCCESS != ret)
break;
}
else if (oper_type == C_OPER_UPDATE)
{
//如果是重新写入文件,那么就是先做一个删除的操作,在做一个更新的操作
//(1).如果重新写入的文件比之前的旧文件要小,那么将在旧文件哪里重新写;
//(2).如果重新写入的文件比之前的旧文件要小,那么将在物理块后面追加重新写;
// 但是在索引文件里面,设置的偏移量,尺寸已经变为新文件写入的信息了,因此查找时也是找到的重新写过的新文件
if (0 != old_size)
{
ret = index_handle_->update_block_info(C_OPER_DELETE, old_size);
if (TFS_SUCCESS != ret)
break;
ret = index_handle_->update_block_info(C_OPER_UPDATE, file_meta.get_size());
if (TFS_SUCCESS != ret)
break;
}
else
{
ret = index_handle_->update_block_info(C_OPER_UPDATE, 0);
if (TFS_SUCCESS != ret)
break;
}
}
}
while (0);
TBSYS_LOG(DEBUG, "close write file, blockid: %u, fileid: %" PRI64_PREFIX "u, ret: %d", logic_block_id_,
inner_file_id, ret);
if (TFS_SUCCESS != ret) //error occur
{
if (need_update_meta)
{
//rollback
index_handle_->update_segment_meta(bak_file_meta.get_key(), bak_file_meta);
}
}
else
{
if (commit_offset)
{
//块数据的偏移量:第一次写入文件为,第二次作为元数据file_meta.set_offset(block_offset)的起始偏移
//第三时是累计第一次+第二次的值作为偏移量,次以此类推
index_handle_->commit_block_data_offset(file_size + sizeof(FileInfo));
}
}
//flush index
index_handle_->flush();
return ret;
}
(4).在TFS中数据块的组织形式是: 一个物理主块+多个物理扩展块
//扩展块
int LogicBlock::extend_block(const int32_t size, const int32_t offset)
{
int32_t retry_times = MAX_EXTEND_TIMES;
//extend retry_times extend block in one call
while (retry_times)
{
//如果超过了avail_data_size_,新生成扩展块
if (offset + size > avail_data_size_) //need extend block
{
TBSYS_LOG(INFO,
"blockid: %u need ext block. offset: %d, datalen: %d, availsize: %d, data curr offset: %d, retry: %d",
logic_block_id_, offset, size, avail_data_size_, index_handle_->get_block_data_offset(), retry_times);
uint32_t physical_ext_blockid = 0;
uint32_t physical_blockid = 0;
// get the last prev block id of this logic block
std::list<PhysicalBlock*>* physcial_blk_list = &physical_block_list_;
if (0 == physcial_blk_list->size())
{
TBSYS_LOG(ERROR, "blockid: %u physical block list is empty!", logic_block_id_);
return EXIT_PHYSICALBLOCK_NUM_ERROR;
}
//可见是直接取的列表的最后一个元素,来形成这种块前缀的关系
physical_blockid = physcial_blk_list->back()->get_physic_block_id();
// new one ext block
PhysicalBlock* tmp_physic_block = NULL;
int ret = BlockFileManager::get_instance()->new_ext_block(logic_block_id_, physical_blockid,
physical_ext_blockid, &tmp_physic_block);
if (TFS_SUCCESS != ret)
return ret;
//然后又将新生成的插入physical_block_list_中去,这样新生成的就是最后一个元素了,
//再次进行扩展时就会取到它本身
physical_block_list_.push_back(tmp_physic_block);
avail_data_size_ += tmp_physic_block->get_total_data_len();
}
else
{
break;
}
--retry_times;
}
if (0 == retry_times)
{
TBSYS_LOG(ERROR, "blockid: %u extend block too much!", logic_block_id_);
return EXIT_PHYSICALBLOCK_NUM_ERROR;
}
return TFS_SUCCESS;
}
(5). 新生成扩展块
//新生成扩展块
int BlockFileManager::new_ext_block(const uint32_t logic_block_id, const uint32_t physical_block_id,
uint32_t& ext_physical_block_id, PhysicalBlock **physic_block)
{
ScopedRWLock scoped_lock(rw_lock_, WRITE_LOCKER);
int ret = TFS_SUCCESS;
BlockType block_type;
if (0 != physical_block_id)
{
block_type = C_EXT_BLOCK;
}
else
{
return EXIT_NO_LOGICBLOCK_ERROR;
}
LogicBlockMapIter mit = logic_blocks_.find(logic_block_id);
if (mit == logic_blocks_.end())
{
return EXIT_NO_LOGICBLOCK_ERROR;
}
//找到一个没有被占用的扩展块号
ret = find_avail_block(ext_physical_block_id, block_type);
if (TFS_SUCCESS != ret)
return ret;
PhysicalBlockMapIter pmit = physcial_blocks_.find(ext_physical_block_id);
if (pmit != physcial_blocks_.end())
{
TBSYS_LOG(ERROR, "physical block conflict. fatal error! ext physical blockid: %u", ext_physical_block_id);
assert(false);
}
//make sure physical_block_id is exist
pmit = physcial_blocks_.find(physical_block_id);
if (pmit == physcial_blocks_.end())
{
TBSYS_LOG(ERROR, "can not find physical blockid: %u", physical_block_id);
assert(false);
}
if (NULL == pmit->second)
{
TBSYS_LOG(ERROR, "physical blockid: %u point null", physical_block_id);
assert(false);
}
normal_bit_map_->set(ext_physical_block_id);
PhysicalBlock* tmp_physical_block = new PhysicalBlock(ext_physical_block_id, super_block_.mount_point_,
super_block_.extend_block_size_, C_EXT_BLOCK);
//设置新生成的扩展块的块前缀:填写前一个物理块号
tmp_physical_block->set_block_prefix(logic_block_id, physical_block_id, 0);
TBSYS_LOG(INFO, "new ext block. logic blockid: %u, prev physical blockid: %u, now physical blockid: %u",
logic_block_id, physical_block_id, ext_physical_block_id);
do
{
//write physical block info to disk
ret = tmp_physical_block->dump_block_prefix();
if (TFS_SUCCESS != ret)
break;
//取前面的物理块,设置块前缀:填写后一个物理块号
//构造prev_physic_block,ext_physic_blockid_这种关系,在将block_prefix这种关系写入磁盘
PhysicalBlock* prev_physic_block = pmit->second;
prev_physic_block->set_next_block(ext_physical_block_id);
//write prev block info to disk
ret = prev_physic_block->dump_block_prefix();
if (TFS_SUCCESS != ret)
break;
ret = super_block_impl_->write_bit_map(normal_bit_map_, error_bit_map_);
if (TFS_SUCCESS != ret)
break;
//update superblock info
++super_block_.used_extend_block_count_;
ret = super_block_impl_->write_super_blk(super_block_);
if (TFS_SUCCESS != ret)
break;
//sync to disk
ret = super_block_impl_->flush_file();
if (TFS_SUCCESS != ret)
break;
physcial_blocks_.insert(PhysicalBlockMap::value_type(ext_physical_block_id, tmp_physical_block));
(*physic_block) = tmp_physical_block;
}
while (0);
if (TFS_SUCCESS != ret)
{
TBSYS_LOG(ERROR, "new ext block error! logic blockid: %u. ret: %d", logic_block_id, ret);
tbsys::gDelete(tmp_physical_block);
}
return ret;
}
(6). 找到一个没有被占用的扩展块号
//查找可用的块
int BlockFileManager::find_avail_block(uint32_t& ext_physical_block_id, const BlockType block_type)
{
int32_t i = 1;
int32_t size = super_block_.main_block_count_;
//如果是扩展块类型,起始小标需要跳过前面的主块
if (C_EXT_BLOCK == block_type)
{
i = super_block_.main_block_count_ + 1;
size += super_block_.extend_block_count_;
}
bool hit_block = false;
for (; i <= size; ++i)
{
//查找得到一个未使用的块
if (!normal_bit_map_->test(i))
{
//并且不会在错误的位图中出现
if (error_bit_map_->test(i)) //skip error block
{
continue;
}
hit_block = true;
break;
}
}
// find nothing
if (!hit_block)
{
TBSYS_LOG(ERROR, "block is exhausted! blocktype: %d/n", block_type);
return EXIT_BLOCK_EXHAUST_ERROR;
}
TBSYS_LOG(DEBUG, "find avail blockid: %u/n", i);
ext_physical_block_id = i;
return TFS_SUCCESS;
}
(7).实际写物理块DataHandle类的方法:
int DataHandle::write_segment_data(const char* buf, const int32_t nbytes, const int32_t offset)
{
if (NULL == buf)
{
return EXIT_POINTER_NULL;
}
PhysicalBlock* tmp_physical_block = NULL;
int32_t inner_offset = 0;
int32_t written_len = 0, writting_len = 0;
int ret = TFS_SUCCESS;
while (written_len < nbytes)
{
//尝试一次性写完
writting_len = nbytes - written_len;
//offset + written_len= 偏移量+累计写入的字节数
ret = choose_physic_block(&tmp_physical_block, offset + written_len, inner_offset, writting_len);
if (TFS_SUCCESS != ret)
return ret;
//参数:写入缓冲,写入的自己数,物理块内部的偏移量
ret = tmp_physical_block->pwrite_data(buf + written_len, writting_len, inner_offset);
if (TFS_SUCCESS != ret)
return ret;
written_len += writting_len;
}
return ret;
}
//读取实际的文件数据过程
int DataHandle::read_segment_data(char* buf, const int32_t nbytes, const int32_t offset)
{
if (NULL == buf)
{
return EXIT_POINTER_NULL;
}
PhysicalBlock* tmp_physical_block = NULL;
int32_t inner_offset = 0;
int32_t has_read_len = 0, reading_len = 0;
int ret = TFS_SUCCESS;
while (has_read_len < nbytes)
{
//每次都尝试一次性读取完剩余的字节数
reading_len = nbytes - has_read_len;
//根据offset + has_read_len偏移量,选取一个物理块,并且确定实际读取是在这个物理块内部的偏移量
ret = choose_physic_block(&tmp_physical_block, offset + has_read_len, inner_offset, reading_len);
if (TFS_SUCCESS != ret)
return ret;
//从选中的物理块中从内部偏移inner_offset开始读取reading_len字节的数据,拷贝到输出buf + has_read_len缓冲中
ret = tmp_physical_block->pread_data(buf + has_read_len, reading_len, inner_offset);
if (TFS_SUCCESS != ret)
return ret;
has_read_len += reading_len;
}
return ret;
}
//选择一个可写的物理块
int DataHandle::choose_physic_block(PhysicalBlock** tmp_physical_block, const int32_t offset,
int32_t& inner_offset, int32_t& inner_len)
{
std::list<PhysicalBlock*>* physic_block_list = logic_block_->get_physic_block_list();
std::list<PhysicalBlock*>::iterator lit = physic_block_list->begin();
int32_t sum_offset = 0;
int32_t prev_sum_offset = 0;
int32_t data_len = 0;
for (; lit != physic_block_list->end(); ++lit)
{
data_len = (*lit)->get_total_data_len();
//累加各个物理块的数据长度
sum_offset += data_len;
//一个文件可能分布在:一个物理主块+多个扩展块
//offset= 偏移量(写入的起始位置)+累计写入的字节数,确定写入的开始位置
if (offset < sum_offset)
{
//取这一个物理块
*tmp_physical_block = (*lit);
//计算这个物理块的内部偏移量,比如说一个逻辑块对应一个物理主块,两个物理扩展块
//写文件时,前两个物理块被写满,到第三个,这里确定第三个物理块内部的偏移量
inner_offset = offset - prev_sum_offset;
//比如初始写入时:offset + written_len =offset+0,inner_len=writting_len=尝试一次性写完的字节数,
//如果大于数据长度,那么writting_len值就会改变,取实际能够写入的字节数
if ((offset + inner_len) > sum_offset)
{
inner_len = sum_offset - offset;
}
break;
}
prev_sum_offset += data_len;
}
//can not find
if (lit == physic_block_list->end())
{
return EXIT_PHYSIC_BLOCK_OFFSET_ERROR;
}
return TFS_SUCCESS;
}
- TFS文件系统写文件分析
- TFS文件系统格式化分析
- TFS文件系统策略分析
- TFS大文件分析
- 文件系统读写--文件写过程代码分析
- TFS文件系统数据服务器启动加载分析
- 《海量小文件》分布式文件系统-TFS
- TFS文件系统
- tfs文件系统笔记
- TFS集群文件系统
- tfs--淘宝文件系统扩容
- TFS淘宝文件系统原理
- TFS文件系统 安装
- 淘宝分布式文件系统TFS
- 【文件】FAT文件系统分析
- Taobao分布式文件系统TFS简析
- Taobao分布式文件系统TFS简析
- Taobao分布式文件系统TFS简析 .
- TFS文件系统格式化分析
- linux内核编译过程中出现两个错误的解决方法~!
- TFS文件系统数据服务器启动加载分析
- 贝子你能回来吗?
- the connection to the VMware USB Arbitration Service was unsuccessful.Please check the status of this service in the Microsogt M
- TFS文件系统写文件分析
- 将二维矩阵0元素所在行列都标记为0
- 自由软件之父
- magento -- 推荐插件 -- 按字母过滤列表页产品 --alphabates
- 持续进步
- PDM 四阶段成熟度模型
- PDM 四阶段成熟度模型
- PDM 四阶段成熟度模型
- PDM 四阶段成熟度模型