public class MapredCarbonInputFormat extends CarbonTableInputFormat<org.apache.hadoop.io.ArrayWritable> implements org.apache.hadoop.mapred.InputFormat<Void,org.apache.hadoop.io.ArrayWritable>, org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.AvoidSplitCombination
ALTER_PARTITION_ID, DATABASE_NAME, INPUT_FILES, INPUT_SEGMENT_NUMBERS, PARTITIONS_TO_PRUNE, TABLE_NAME, UPADTE_T, VALIDATE_INPUT_SEGMENT_IDs| Constructor and Description |
|---|
MapredCarbonInputFormat() |
| Modifier and Type | Method and Description |
|---|---|
org.apache.hadoop.mapred.RecordReader<Void,org.apache.hadoop.io.ArrayWritable> |
getRecordReader(org.apache.hadoop.mapred.InputSplit inputSplit,
org.apache.hadoop.mapred.JobConf jobConf,
org.apache.hadoop.mapred.Reporter reporter) |
org.apache.hadoop.mapred.InputSplit[] |
getSplits(org.apache.hadoop.mapred.JobConf jobConf,
int numSplits) |
boolean |
shouldSkipCombine(org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration conf) |
createRecordReader, getAbsoluteTableIdentifier, getBlockRowCount, getColumnProjection, getDatabaseName, getDataTypeConverter, getFilterPredicates, getPartitionsToPrune, getQueryModel, getReadSupportClass, getSegmentsToAccess, getSplits, getSplitsOfOneSegment, getSplitsOfStreaming, getTableName, getValidateSegmentsToAccess, isSplitable, makeSplit, makeSplit, setCarbonReadSupport, setColumnProjection, setDatabaseName, setDataMapJob, setDataTypeConverter, setFilterPredicates, setPartitionIdList, setPartitionsToPrune, setQuerySegment, setSegmentsToAccess, setTableInfo, setTableName, setTablePath, setValidateSegmentsToAccessaddInputPath, addInputPathRecursively, addInputPaths, computeSplitSize, getBlockIndex, getFormatMinSplitSize, getInputDirRecursive, getInputPathFilter, getInputPaths, getMaxSplitSize, getMinSplitSize, listStatus, makeSplit, makeSplit, setInputDirRecursive, setInputPathFilter, setInputPaths, setInputPaths, setMaxInputSplitSize, setMinInputSplitSizepublic org.apache.hadoop.mapred.InputSplit[] getSplits(org.apache.hadoop.mapred.JobConf jobConf,
int numSplits)
throws IOException
getSplits in interface org.apache.hadoop.mapred.InputFormat<Void,org.apache.hadoop.io.ArrayWritable>IOExceptionpublic org.apache.hadoop.mapred.RecordReader<Void,org.apache.hadoop.io.ArrayWritable> getRecordReader(org.apache.hadoop.mapred.InputSplit inputSplit, org.apache.hadoop.mapred.JobConf jobConf, org.apache.hadoop.mapred.Reporter reporter) throws IOException
getRecordReader in interface org.apache.hadoop.mapred.InputFormat<Void,org.apache.hadoop.io.ArrayWritable>IOExceptionpublic boolean shouldSkipCombine(org.apache.hadoop.fs.Path path,
org.apache.hadoop.conf.Configuration conf)
throws IOException
shouldSkipCombine in interface org.apache.hadoop.hive.ql.io.CombineHiveInputFormat.AvoidSplitCombinationIOExceptionCopyright © 2016–2018 The Apache Software Foundation. All rights reserved.