c 使用libhdfs写数据 配置blocksize

来源:互联网 发布:关系数据库的基本结构 编辑:程序博客网 时间:2024/05/18 00:40
在libhdfs提供的hdfswrite基础上修改好 自己的namenode  ip及端口
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements.  See the NOTICE file * distributed with this work for additional information * regarding copyright ownership.  The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License.  You may obtain a copy of the License at * *     http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */#include "hdfs.h" int main(int argc, char **argv) {    if (argc != 4) {        fprintf(stderr, "Usage: hdfs_write <filename> <filesize> <buffersize>\n");        exit(-1);    }        hdfsFS fs = hdfsConnect("192.168.1.1", 8020);    if (!fs) {        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");        exit(-1);    }      const char* writeFileName = argv[1];    tSize fileTotalSize = strtoul(argv[2], NULL, 10);    tSize bufferSize = strtoul(argv[3], NULL, 10);       hdfsFile writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);    if (!writeFile) {        fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);        exit(-2);    }    // data to be written to the file    char* buffer = malloc(sizeof(char) * bufferSize);    if(buffer == NULL) {        return -2;    }    int i = 0;    for (i=0; i < bufferSize; ++i) {        buffer[i] = 'a' + (i%26);    }        // write to the file    tSize nrRemaining;    for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= bufferSize ) {        int curSize = ( bufferSize < nrRemaining ) ? bufferSize : (int)nrRemaining;         hdfsWrite(fs, writeFile, (void*)buffer, curSize);     }    free(buffer);    hdfsCloseFile(fs, writeFile);    hdfsDisconnect(fs);    return 0;}/** * vim: ts=4: sw=4: et: */
配置HADOOP_HOME  JAVA_HOME HADOOP_CONF_DIR 及CLASSPATH
HADOOP_HOME=/usr/home/hadoopexport HADOOP_HOMEfor i in $HADOOP_HOME/*.jar ; do    CLASSPATH=$CLASSPATH:$idonefor i in $HADOOP_HOME/lib/*.jar ; do    CLASSPATH=$CLASSPATH:$idoneHADOOP_CONF_DIR=$HADOOP_HOME/confCLASSPATH=$CLASSPATH:$HADOOP_CONF_DIRexport  HADOOP_CONF_DIR CLASSPATH


注意CLASSPATH一定要包含$HADOOP_CONF_DIR
blocksize及其他属性 可以在HADOOP_CONF_DIR中配置
详细请见 $HADOOP_HOME/src/c++/libhdfs 下hdfs.c
hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,                       int bufferSize, short replication, tSize blockSize){}
及 hdfsJniHelper.c中:
JNIEnv* getJNIEnv(void){}方法
编译:
gcc  hdfs_write.c -I${HADOOP_HOME}/src/c++/libhdfs -I${JAVA_HOME}/include -I${JAVA_HOME}/include/linux -L${HADOOP_HOME}/libhdfs -lhdfs -o hdfs_write
hdfs_write <filename> <filesize> <buffersize>