| Modifier and Type | Field and Description |
|---|---|
static String |
READ_BUFFER_SIZE |
static String |
READ_BUFFER_SIZE_DEFAULT |
static String |
STREAM_RECORD_READER_INSTANCE |
| Constructor and Description |
|---|
CarbonStreamInputFormat() |
| Modifier and Type | Method and Description |
|---|---|
org.apache.hadoop.mapreduce.RecordReader<Void,Object> |
createRecordReader(org.apache.hadoop.mapreduce.InputSplit split,
org.apache.hadoop.mapreduce.TaskAttemptContext context) |
static org.apache.carbondata.core.scan.filter.GenericQueryType[] |
getComplexDimensions(org.apache.carbondata.core.metadata.schema.table.CarbonTable carbontable,
org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn[] carbonColumns,
org.apache.carbondata.core.cache.Cache<org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier,org.apache.carbondata.core.cache.dictionary.Dictionary> cache) |
void |
setInputMetricsStats(InputMetricsStats inputMetricsStats) |
void |
setIsVectorReader(boolean vectorReader) |
void |
setModel(org.apache.carbondata.core.scan.model.QueryModel model) |
void |
setUseRawRow(boolean useRawRow) |
addInputPath, addInputPathRecursively, addInputPaths, computeSplitSize, getBlockIndex, getFormatMinSplitSize, getInputDirRecursive, getInputPathFilter, getInputPaths, getMaxSplitSize, getMinSplitSize, getSplits, isSplitable, listStatus, makeSplit, makeSplit, setInputDirRecursive, setInputPathFilter, setInputPaths, setInputPaths, setMaxInputSplitSize, setMinInputSplitSizepublic static final String READ_BUFFER_SIZE
public static final String READ_BUFFER_SIZE_DEFAULT
public static final String STREAM_RECORD_READER_INSTANCE
public void setUseRawRow(boolean useRawRow)
public void setInputMetricsStats(InputMetricsStats inputMetricsStats)
public void setIsVectorReader(boolean vectorReader)
public void setModel(org.apache.carbondata.core.scan.model.QueryModel model)
public org.apache.hadoop.mapreduce.RecordReader<Void,Object> createRecordReader(org.apache.hadoop.mapreduce.InputSplit split, org.apache.hadoop.mapreduce.TaskAttemptContext context) throws IOException, InterruptedException
createRecordReader in class org.apache.hadoop.mapreduce.InputFormat<Void,Object>IOExceptionInterruptedExceptionpublic static org.apache.carbondata.core.scan.filter.GenericQueryType[] getComplexDimensions(org.apache.carbondata.core.metadata.schema.table.CarbonTable carbontable,
org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn[] carbonColumns,
org.apache.carbondata.core.cache.Cache<org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier,org.apache.carbondata.core.cache.dictionary.Dictionary> cache)
throws IOException
IOExceptionCopyright © 2016–2018 The Apache Software Foundation. All rights reserved.