java - 如何将 .txt/.csv 文件转换为 ORC 格式

标签 java hadoop

对于某些要求,我想将文本文件(定界) 转换为ORC(优化行列) 格式。 因为我必须定期运行它,所以我想编写一个 java 程序 来执行此操作。 我不想使用 Hive 临时表解决方法。 任何人都可以帮我做吗? 以下是我尝试过的

/*ORCMapper.java*/
import java.io.IOException;
import java.util.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.hive.ql.io.orc.*;
import org.apache.hadoop.io.*;

public class ORCMapper  extends MapReduceBase implements
Mapper<LongWritable, Text, NullWritable, Writable>{

    OrcSerde serde;
    @Override
    public void configure(JobConf job) {
        serde = new OrcSerde();
    }

    @Override
    public void map(LongWritable key, Text value,
            OutputCollector<NullWritable, Writable> output, Reporter reporter)
            throws IOException {
        output.collect(NullWritable.get(),serde.serialize(value, null));
    }

}

/*ORCReducer.java*/
import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;

public class ORCReducer extends MapReduceBase implements Reducer<NullWritable, Writable, NullWritable, Writable>{

    @Override
    public void reduce(NullWritable key, Iterator<Writable> values,
            OutputCollector<NullWritable, Writable> output, Reporter reporter)
            throws IOException {
        Writable value = values.next();
         output.collect(key, value);
    }

}

/*ORCDriver.java*/
import java.io.*;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.hive.ql.io.orc.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
public class ORCDriver {
    public static void main(String[] args) throws IOException,
    InterruptedException, ClassNotFoundException {
        JobClient client = new JobClient();
        JobConf conf = new JobConf("ORC_Generator");
        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputKeyClass(NullWritable.class);
        conf.setOutputValueClass(Writable.class);
        conf.setOutputFormat(OrcOutputFormat.class);
        FileInputFormat.addInputPath(conf, new Path("hdfs://localhost:9000/path/to/ipdir/textfile"));
        OrcOutputFormat.setOutputPath(conf, new Path("hdfs://localhost:9000/path/to/opdir/orcfile"));
        conf.setMapperClass(ORCMapper.class);
        System.out.println(OrcOutputFormat.getWorkOutputPath(conf));
        conf.setNumReduceTasks(0);

        client.setConf(conf);
        try {
          JobClient.runJob(conf);
        } catch (Exception e) {
          e.printStackTrace();
        }

    }

}

运行这个显示下面的错误,并且在我的本地生成了一个名为 part-00000 的文件

java.io.IOException: File already exists:part-00000
    at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:249)
    at org.apache.hadoop.fs.RawLocalFileSystem.create(RawLocalFileSystem.java:241)
    at org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer.<init>(ChecksumFileSystem.java:335)
    at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:381)
    at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:364)
    at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:564)
    at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:545)
    at org.apache.hadoop.hive.ql.io.orc.WriterImpl.ensureWriter(WriterImpl.java:1672)
    at org.apache.hadoop.hive.ql.io.orc.WriterImpl.flushStripe(WriterImpl.java:1688)
    at org.apache.hadoop.hive.ql.io.orc.WriterImpl.close(WriterImpl.java:1868)
    at org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat$OrcRecordWriter.close(OrcOutputFormat.java:95)
    at org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat$OrcRecordWriter.close(OrcOutputFormat.java:80)
    at org.apache.hadoop.mapred.MapTask$DirectMapOutputCollector.close(MapTask.java:833)
    at org.apache.hadoop.mapred.MapTask.closeQuietly(MapTask.java:1763)
    at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:439)
    at org.apache.hadoop.mapred.MapTask.run(MapTask.java:366)
    at org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:223)
    at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
    at java.util.concurrent.FutureTask.run(FutureTask.java:262)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
    at java.lang.Thread.run(Thread.java:745)
14/09/02 11:23:26 INFO mapred.LocalJobRunner: Map task executor complete.
14/09/02 11:23:26 WARN mapred.LocalJobRunner: job_local688970064_0001
java.lang.Exception: java.lang.NullPointerException
    at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:354)
Caused by: java.lang.NullPointerException
    at org.apache.hadoop.hive.ql.io.orc.WriterImpl.createTreeWriter(WriterImpl.java:1515)
    at org.apache.hadoop.hive.ql.io.orc.WriterImpl.<init>(WriterImpl.java:154)
    at org.apache.hadoop.hive.ql.io.orc.OrcFile.createWriter(OrcFile.java:258)
    at org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat$OrcRecordWriter.write(OrcOutputFormat.java:63)
    at org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat$OrcRecordWriter.write(OrcOutputFormat.java:46)
    at org.apache.hadoop.mapred.MapTask$DirectMapOutputCollector.collect(MapTask.java:847)
    at org.apache.hadoop.mapred.MapTask$OldOutputCollector.collect(MapTask.java:591)
    at ORCMapper.map(ORCMapper.java:42)
    at ORCMapper.map(ORCMapper.java:1)
    at org.apache.hadoop.mapred.MapRunner.run(MapRunner.java:50)
    at org.apache.hadoop.mapred.MapTask.runOldMapper(MapTask.java:430)
    at org.apache.hadoop.mapred.MapTask.run(MapTask.java:366)
    at org.apache.hadoop.mapred.LocalJobRunner$Job$MapTaskRunnable.run(LocalJobRunner.java:223)
    at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
    at java.util.concurrent.FutureTask.run(FutureTask.java:262)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
    at java.lang.Thread.run(Thread.java:745)
14/09/02 11:23:26 INFO mapred.JobClient:  map 0% reduce 0%
14/09/02 11:23:26 INFO mapred.JobClient: Job complete: job_local688970064_0001
14/09/02 11:23:26 INFO mapred.JobClient: Counters: 0
14/09/02 11:23:26 INFO mapred.JobClient: Job Failed: NA
java.io.IOException: Job failed!
    at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:1357)
    at ORCDriver.main(ORCDriver.java:53)

最佳答案

你可以通过这样的命令将文本数据插入到orc表中:

insert overwrite table orcTable select * from textTable;

第一个表是orcTable,是通过以下命令创建的:

create table orcTable(name string, city string) stored as orc;

textTable 与orcTable 结构相同。

关于java - 如何将 .txt/.csv 文件转换为 ORC 格式,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/25117760/

相关文章:

java - Hadoop - 文本到浮点转换的问题

java - 一个按钮不会出现

java - 如果从 JavaFX 中的结果集中找不到任何内容,则显示警报

java - 使用 Guava 多重集或 Collections.frequency()?

java - 将 groovy 的 RestClient 与 ignoreSSLIssues() 一起使用时出现 SSLPeerUnverifiedException :"peer not authenticated"

hadoop - 在 map-reduce 中解析消息时 hl7 消息编码错误

amazon-web-services - Zeppelin Notebook 没有名为 pandas 和 matplotlib 的模块

java - 如何删除 jsonarray 中的空 json 对象

hadoop - Apache Kafka和Hadoop是否应该分别安装(在不同的群集上)?

hadoop - MapReduce默认的Input键是什么?