scala – 如何强制spark / hadoop忽略文件上的.gz扩展名并将其作
我有以下代码:
val lines: RDD[String] = sparkSession.sparkContext.textFile("s3://mybucket/file.gz") URL以.gz结尾,但这是遗留代码的结果.该文件是纯文本,不涉及压缩.然而,火花坚持将其作为GZIP文件阅读,这显然是失败的.如何让它忽略扩展并简单地将文件作为文本读取? 基于this article我尝试在不包含GZIP编解码器的各个地方设置配置,例如: sparkContext.getConf.set("spark.hadoop.io.compression.codecs",classOf[DefaultCodec].getCanonicalName) 这似乎没有任何影响. 由于文件在S3上,我不能简单地重命名它们而不复制整个文件. 解决方法
第一个解决方案:着色GzipCodec
我们的想法是在org.apache.hadoop.io.compress包中定义阴影/阴影GzipCodec,在你自己的源代码中包含这个java file并替换这一行: public String getDefaultExtension() { return ".gz"; } 有: public String getDefaultExtension() { return ".whatever"; } 在构建项目时,这将有效地使用GzipCodec的定义而不是依赖项提供的定义(这是GzipCodec的阴影). 这样,在解析文件时,textFile()将被强制应用默认编解码器,因为gzip不再适合文件的命名. 此解决方案的不便之处在于您无法在同一个应用程序中处理真正的gzip文件. 第二种解决方案:将newAPIHadoopFile与自定义/修改后的TextInputFormat一起使用 您可以使用newAPIHadoopFile(而不是textFile)与自定义/修改TextInputFormat强制使用DefaultCodec(纯文本). 我们将根据默认的行读取器(TextInputFormat)编写自己的行读取器.我们的想法是删除TextInputFormat的一部分,它发现它的名字是.gz,因此在读取之前解压缩文件. 而不是调用sparkContext.textFile, // plain text file with a .gz extension: sparkContext.textFile("s3://mybucket/file.gz") 我们可以使用底层的sparkContext.newAPIHadoopFile,它允许我们指定如何读取输入: import org.apache.hadoop.mapreduce.lib.input.FakeGzInputFormat import org.apache.hadoop.conf.Configuration import org.apache.hadoop.io.{LongWritable,Text} sparkContext .newAPIHadoopFile( "s3://mybucket/file.gz",classOf[FakeGzInputFormat],// This is our custom reader classOf[LongWritable],classOf[Text],new Configuration(sparkContext.hadoopConfiguration) ) .map { case (_,text) => text.toString } 调用newAPIHadoopFile的常用方法是使用TextInputFormat.这是包装文件读取方式以及根据文件扩展名选择压缩编解码器的部分. 我们将其称为FakeGzInputFormat并将其实现为 package org.apache.hadoop.mapreduce.lib.input; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import com.google.common.base.Charsets; public class FakeGzInputFormat extends TextInputFormat { public RecordReader<LongWritable,Text> createRecordReader( InputSplit split,TaskAttemptContext context ) { String delimiter = context.getConfiguration().get("textinputformat.record.delimiter"); byte[] recordDelimiterBytes = null; if (null != delimiter) recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8); // Here we use our custom `FakeGzLineRecordReader` instead of // `LineRecordReader`: return new FakeGzLineRecordReader(recordDelimiterBytes); } @Override protected boolean isSplitable(JobContext context,Path file) { return true; // plain text is splittable (as opposed to gzip) } } 事实上,我们必须更深入一级,并用我们自己的(我们称之为FakeGzLineRecordReader)替换默认的 由于从LineRecordReader继承起来非常困难,我们可以复制LineRecordReader(在src / main / java / org / apache / hadoop / mapreduce / lib / input中)并稍微修改(并简化)initialize(InputSplit genericSplit,TaskAttemptContext context)方法通过强制使用默认编解码器(纯文本): (与原始 package org.apache.hadoop.mapreduce.lib.input; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Seekable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @InterfaceAudience.LimitedPrivate({"MapReduce","Pig"}) @InterfaceStability.Evolving public class FakeGzLineRecordReader extends RecordReader<LongWritable,Text> { private static final Logger LOG = LoggerFactory.getLogger(FakeGzLineRecordReader.class); public static final String MAX_LINE_LENGTH = "mapreduce.input.linerecordreader.line.maxlength"; private long start; private long pos; private long end; private SplitLineReader in; private FSDataInputStream fileIn; private Seekable filePosition; private int maxLineLength; private LongWritable key; private Text value; private byte[] recordDelimiterBytes; public FakeGzLineRecordReader(byte[] recordDelimiter) { this.recordDelimiterBytes = recordDelimiter; } // This has been simplified a lot since we don't need to handle compression // codecs. public void initialize( InputSplit genericSplit,TaskAttemptContext context ) throws IOException { FileSplit split = (FileSplit) genericSplit; Configuration job = context.getConfiguration(); this.maxLineLength = job.getInt(MAX_LINE_LENGTH,Integer.MAX_VALUE); start = split.getStart(); end = start + split.getLength(); final Path file = split.getPath(); final FileSystem fs = file.getFileSystem(job); fileIn = fs.open(file); fileIn.seek(start); in = new UncompressedSplitLineReader( fileIn,job,this.recordDelimiterBytes,split.getLength() ); filePosition = fileIn; if (start != 0) { start += in.readLine(new Text(),maxBytesToConsume(start)); } this.pos = start; } // Simplified as input is not compressed: private int maxBytesToConsume(long pos) { return (int) Math.max(Math.min(Integer.MAX_VALUE,end - pos),maxLineLength); } // Simplified as input is not compressed: private long getFilePosition() { return pos; } private int skipUtfByteOrderMark() throws IOException { int newMaxLineLength = (int) Math.min(3L + (long) maxLineLength,Integer.MAX_VALUE); int newSize = in.readLine(value,newMaxLineLength,maxBytesToConsume(pos)); pos += newSize; int textLength = value.getLength(); byte[] textBytes = value.getBytes(); if ((textLength >= 3) && (textBytes[0] == (byte)0xEF) && (textBytes[1] == (byte)0xBB) && (textBytes[2] == (byte)0xBF)) { LOG.info("Found UTF-8 BOM and skipped it"); textLength -= 3; newSize -= 3; if (textLength > 0) { textBytes = value.copyBytes(); value.set(textBytes,3,textLength); } else { value.clear(); } } return newSize; } public boolean nextKeyValue() throws IOException { if (key == null) { key = new LongWritable(); } key.set(pos); if (value == null) { value = new Text(); } int newSize = 0; while (getFilePosition() <= end || in.needAdditionalRecordAfterSplit()) { if (pos == 0) { newSize = skipUtfByteOrderMark(); } else { newSize = in.readLine(value,maxLineLength,maxBytesToConsume(pos)); pos += newSize; } if ((newSize == 0) || (newSize < maxLineLength)) { break; } LOG.info("Skipped line of size " + newSize + " at pos " + (pos - newSize)); } if (newSize == 0) { key = null; value = null; return false; } else { return true; } } @Override public LongWritable getCurrentKey() { return key; } @Override public Text getCurrentValue() { return value; } public float getProgress() { if (start == end) { return 0.0f; } else { return Math.min(1.0f,(getFilePosition() - start) / (float)(end - start)); } } public synchronized void close() throws IOException { try { if (in != null) { in.close(); } } finally {} } } (编辑:李大同) 【声明】本站内容均来自网络,其相关言论仅代表作者个人观点,不代表本站立场。若无意侵犯到您的权利,请及时与联系站长删除相关内容! |