public class MapredCarbonInputFormat extends CarbonTableInputFormat<org.apache.hadoop.io.ArrayWritable> implements org.apache.hadoop.mapred.InputFormat<Void,org.apache.hadoop.io.ArrayWritable>, org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.AvoidSplitCombination
DATABASE_NAME, INPUT_FILES, INPUT_SEGMENT_NUMBERS, TABLE_NAMEhitedStreamFiles, numBlocks, numSegments, numStreamFiles, numStreamSegments| Constructor and Description |
|---|
MapredCarbonInputFormat() |
| Modifier and Type | Method and Description |
|---|---|
org.apache.hadoop.mapred.RecordReader<Void,org.apache.hadoop.io.ArrayWritable> |
getRecordReader(org.apache.hadoop.mapred.InputSplit inputSplit,
org.apache.hadoop.mapred.JobConf jobConf,
org.apache.hadoop.mapred.Reporter reporter) |
org.apache.hadoop.mapred.InputSplit[] |
getSplits(org.apache.hadoop.mapred.JobConf jobConf,
int numSplits) |
boolean |
shouldSkipCombine(org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration conf) |
getBlockRowCount, getOrCreateCarbonTable, getReadCommitted, getSegmentsToAccess, getSplits, getSplitsOfOneSegment, getSplitsOfStreaming, getSplitsOfStreaming, makeSplit, refreshSegmentCacheIfRequiredcreateQueryModel, createQueryModel, createRecordReader, getAbsoluteTableIdentifier, getAccessStreamingSegments, getColumnProjection, getDatabaseName, getDataBlocksOfSegment, getDataTypeConverter, getFilterPredicates, getHitedStreamFiles, getNumBlocks, getNumSegments, getNumStreamFiles, getNumStreamSegments, getPartitionsToPrune, getReadCommittedScope, getReadSupportClass, getTableInfo, getTableName, getValidateSegmentsToAccess, isFgDataMapPruningEnable, isSplitable, projectAllColumns, setAccessStreamingSegments, setCarbonReadSupport, setColumnProjection, setColumnProjection, setDatabaseName, setDataTypeConverter, setFgDataMapPruning, setFilterPredicates, setPartitionIdList, setPartitionsToPrune, setQuerySegment, setQuerySegment, setQuerySegment, setReadCommittedScope, setSegmentsToAccess, setTableInfo, setTableName, setTablePath, setTransactionalTable, setValidateSegmentsToAccessaddInputPath, addInputPathRecursively, addInputPaths, computeSplitSize, getBlockIndex, getFormatMinSplitSize, getInputDirRecursive, getInputPathFilter, getInputPaths, getMaxSplitSize, getMinSplitSize, listStatus, makeSplit, makeSplit, setInputDirRecursive, setInputPathFilter, setInputPaths, setInputPaths, setMaxInputSplitSize, setMinInputSplitSizepublic org.apache.hadoop.mapred.InputSplit[] getSplits(org.apache.hadoop.mapred.JobConf jobConf,
int numSplits)
throws IOException
getSplits in interface org.apache.hadoop.mapred.InputFormat<Void,org.apache.hadoop.io.ArrayWritable>IOExceptionpublic org.apache.hadoop.mapred.RecordReader<Void,org.apache.hadoop.io.ArrayWritable> getRecordReader(org.apache.hadoop.mapred.InputSplit inputSplit, org.apache.hadoop.mapred.JobConf jobConf, org.apache.hadoop.mapred.Reporter reporter) throws IOException
getRecordReader in interface org.apache.hadoop.mapred.InputFormat<Void,org.apache.hadoop.io.ArrayWritable>IOExceptionpublic boolean shouldSkipCombine(org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration conf)
throws IOException
shouldSkipCombine in interface org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.AvoidSplitCombinationIOExceptionCopyright © 2016–2019 The Apache Software Foundation. All rights reserved.