@InterfaceAudience.Private public class HFileReaderImpl extends Object implements HFile.Reader, org.apache.hadoop.conf.Configurable
HFile.Reader
.Modifier and Type | Class and Description |
---|---|
static class |
HFileReaderImpl.BlockIndexNotLoadedException |
protected static class |
HFileReaderImpl.EncodedScanner
Scanner that operates on encoded data blocks.
|
protected static class |
HFileReaderImpl.HFileScannerImpl |
static class |
HFileReaderImpl.NotSeekedException
An exception thrown when an operation requiring a scanner to be seeked
is invoked on a scanner that is not seeked.
|
Modifier and Type | Field and Description |
---|---|
protected boolean |
decodeMemstoreTS |
static int |
KEY_VALUE_LEN_SIZE
The size of a (key length, value length) tuple that prefixes each entry in
a data block.
|
static int |
MINOR_VERSION_NO_CHECKSUM
In HFile minor version that does not support checksums
|
static int |
MINOR_VERSION_WITH_CHECKSUM
Minor versions in HFile starting with this number have hbase checksums
|
static int |
PBUF_TRAILER_MINOR_VERSION
HFile minor version that introduced pbuf filetrailer
|
Constructor and Description |
---|
HFileReaderImpl(org.apache.hadoop.fs.Path path,
FixedFileTrailer trailer,
FSDataInputStreamWrapper fsdis,
long fileSize,
CacheConfig cacheConf,
HFileSystem hfs,
boolean primaryReplicaReader,
org.apache.hadoop.conf.Configuration conf)
Opens a HFile.
|
HFileReaderImpl(org.apache.hadoop.fs.Path path,
FixedFileTrailer trailer,
FSDataInputStreamWrapper fsdis,
long fileSize,
CacheConfig cacheConf,
HFileSystem hfs,
org.apache.hadoop.conf.Configuration conf)
Deprecated.
|
Modifier and Type | Method and Description |
---|---|
void |
close() |
void |
close(boolean evictOnClose)
Close method with optional evictOnClose
|
protected HFileContext |
createHFileContext(FSDataInputStreamWrapper fsdis,
long fileSize,
HFileSystem hfs,
org.apache.hadoop.fs.Path path,
FixedFileTrailer trailer) |
CellComparator |
getComparator() |
Compression.Algorithm |
getCompressionAlgorithm() |
org.apache.hadoop.conf.Configuration |
getConf() |
DataBlockEncoding |
getDataBlockEncoding() |
org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader |
getDataBlockIndexReader() |
DataInput |
getDeleteBloomFilterMetadata()
Retrieves delete family Bloom filter metadata as appropriate for each
HFile version. |
DataBlockEncoding |
getEffectiveEncodingInCache(boolean isCompaction) |
long |
getEntries() |
HFileContext |
getFileContext()
Return the file context of the HFile this reader belongs to
|
Optional<Cell> |
getFirstKey() |
Optional<byte[]> |
getFirstRowKey()
|
DataInput |
getGeneralBloomFilterMetadata()
Returns a buffer with the Bloom filter metadata.
|
Optional<Cell> |
getLastKey() |
Optional<byte[]> |
getLastRowKey()
|
int |
getMajorVersion() |
HFileBlock |
getMetaBlock(String metaBlockName,
boolean cacheBlock) |
String |
getName()
Returns this reader's "name".
|
org.apache.hadoop.fs.Path |
getPath() |
HFileScanner |
getScanner(boolean cacheBlocks,
boolean pread)
Create a Scanner on this file.
|
HFileScanner |
getScanner(boolean cacheBlocks,
boolean pread,
boolean isCompaction)
Create a Scanner on this file.
|
FixedFileTrailer |
getTrailer() |
org.apache.hadoop.hbase.io.hfile.HFileBlock.FSReader |
getUncachedBlockReader()
For testing
|
boolean |
hasMVCCInfo() |
long |
indexSize() |
boolean |
isDecodeMemStoreTS() |
boolean |
isFileInfoLoaded() |
boolean |
isPrimaryReplicaReader() |
long |
length() |
HFile.FileInfo |
loadFileInfo() |
Optional<Cell> |
midKey() |
boolean |
prefetchComplete()
Returns false if block prefetching was requested for this file and has
not completed, true otherwise
|
HFileBlock |
readBlock(long dataBlockOffset,
long onDiskBlockSize,
boolean cacheBlock,
boolean pread,
boolean isCompaction,
boolean updateCacheMetrics,
BlockType expectedBlockType,
DataBlockEncoding expectedDataBlockEncoding)
Read in a file block.
|
void |
returnBlock(HFileBlock block)
Return the given block back to the cache, if it was obtained from cache.
|
void |
setConf(org.apache.hadoop.conf.Configuration conf) |
boolean |
shouldIncludeMemStoreTS() |
String |
toString() |
void |
unbufferStream()
To close the stream's socket.
|
public static final int MINOR_VERSION_WITH_CHECKSUM
public static final int MINOR_VERSION_NO_CHECKSUM
public static final int PBUF_TRAILER_MINOR_VERSION
public static final int KEY_VALUE_LEN_SIZE
protected boolean decodeMemstoreTS
@Deprecated public HFileReaderImpl(org.apache.hadoop.fs.Path path, FixedFileTrailer trailer, FSDataInputStreamWrapper fsdis, long fileSize, CacheConfig cacheConf, HFileSystem hfs, org.apache.hadoop.conf.Configuration conf) throws IOException
IOException
public HFileReaderImpl(org.apache.hadoop.fs.Path path, FixedFileTrailer trailer, FSDataInputStreamWrapper fsdis, long fileSize, CacheConfig cacheConf, HFileSystem hfs, boolean primaryReplicaReader, org.apache.hadoop.conf.Configuration conf) throws IOException
loadFileInfo()
.path
- Path to HFile.trailer
- File trailer.fsdis
- input stream.fileSize
- Length of the stream.cacheConf
- Cache configuration.hfs
- The file system.conf
- ConfigurationIOException
public long length()
length
in interface HFile.Reader
public void returnBlock(HFileBlock block)
HFile.CachingBlockReader
returnBlock
in interface HFile.CachingBlockReader
block
- Block to be returned.public Optional<Cell> getFirstKey()
getFirstKey
in interface HFile.Reader
public Optional<byte[]> getFirstRowKey()
HFile
version 1: move this to StoreFile after Ryan's
patch goes in to eliminate KeyValue
here.getFirstRowKey
in interface HFile.Reader
public Optional<byte[]> getLastRowKey()
HFile
version 1: move this to StoreFile after
Ryan's patch goes in to eliminate KeyValue
here.getLastRowKey
in interface HFile.Reader
public long getEntries()
getEntries
in interface HFile.Reader
public CellComparator getComparator()
getComparator
in interface HFile.Reader
public Compression.Algorithm getCompressionAlgorithm()
getCompressionAlgorithm
in interface HFile.Reader
public long indexSize()
indexSize
in interface HFile.Reader
public String getName()
HFile.Reader
getName
in interface HFile.Reader
public org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader getDataBlockIndexReader()
getDataBlockIndexReader
in interface HFile.Reader
public FixedFileTrailer getTrailer()
getTrailer
in interface HFile.Reader
public boolean isPrimaryReplicaReader()
isPrimaryReplicaReader
in interface HFile.Reader
public HFile.FileInfo loadFileInfo() throws IOException
loadFileInfo
in interface HFile.Reader
IOException
public org.apache.hadoop.fs.Path getPath()
getPath
in interface HFile.Reader
public DataBlockEncoding getDataBlockEncoding()
getDataBlockEncoding
in interface HFile.Reader
public org.apache.hadoop.conf.Configuration getConf()
getConf
in interface org.apache.hadoop.conf.Configurable
public void setConf(org.apache.hadoop.conf.Configuration conf)
setConf
in interface org.apache.hadoop.conf.Configurable
public boolean isDecodeMemStoreTS()
isDecodeMemStoreTS
in interface HFile.Reader
public boolean shouldIncludeMemStoreTS()
shouldIncludeMemStoreTS
in interface HFile.Reader
public HFileBlock getMetaBlock(String metaBlockName, boolean cacheBlock) throws IOException
getMetaBlock
in interface HFile.Reader
metaBlockName
- cacheBlock
- Add block to cache, if foundIOException
public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, boolean cacheBlock, boolean pread, boolean isCompaction, boolean updateCacheMetrics, BlockType expectedBlockType, DataBlockEncoding expectedDataBlockEncoding) throws IOException
HFile.CachingBlockReader
readBlock
in interface HFile.CachingBlockReader
dataBlockOffset
- offset to read.onDiskBlockSize
- size of the blockisCompaction
- is this block being read as part of a compactionexpectedBlockType
- the block type we are expecting to read with this read operation,
or null to read whatever block type is available and avoid checking (that might reduce
caching efficiency of encoded data blocks)expectedDataBlockEncoding
- the data block encoding the caller is expecting data blocks
to be in, or null to not perform this check and return the block irrespective of the
encoding. This check only applies to data blocks and can be set to null when the caller is
expecting to read a non-data block and has set expectedBlockType accordingly.IOException
public boolean hasMVCCInfo()
hasMVCCInfo
in interface HFile.Reader
public Optional<Cell> getLastKey()
getLastKey
in interface HFile.Reader
public Optional<Cell> midKey() throws IOException
midKey
in interface HFile.Reader
IOException
public void close() throws IOException
close
in interface Closeable
close
in interface AutoCloseable
IOException
public void close(boolean evictOnClose) throws IOException
HFile.Reader
close
in interface HFile.Reader
IOException
public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction)
getEffectiveEncodingInCache
in interface HFile.Reader
public org.apache.hadoop.hbase.io.hfile.HFileBlock.FSReader getUncachedBlockReader()
getUncachedBlockReader
in interface HFile.Reader
public DataInput getGeneralBloomFilterMetadata() throws IOException
getGeneralBloomFilterMetadata
in interface HFile.Reader
IOException
public DataInput getDeleteBloomFilterMetadata() throws IOException
HFile.Reader
HFile
version.
Knows nothing about how that metadata is structured.getDeleteBloomFilterMetadata
in interface HFile.Reader
IOException
public boolean isFileInfoLoaded()
public HFileContext getFileContext()
HFile.Reader
getFileContext
in interface HFile.Reader
public boolean prefetchComplete()
prefetchComplete
in interface HFile.Reader
protected HFileContext createHFileContext(FSDataInputStreamWrapper fsdis, long fileSize, HFileSystem hfs, org.apache.hadoop.fs.Path path, FixedFileTrailer trailer) throws IOException
IOException
public HFileScanner getScanner(boolean cacheBlocks, boolean pread)
HFileScanner.seekTo(Cell)
to position an start the read. There is
nothing to clean up in a Scanner. Letting go of your references to the
scanner is sufficient. NOTE: Do not use this overload of getScanner for
compactions. See getScanner(boolean, boolean, boolean)
getScanner
in interface HFile.Reader
cacheBlocks
- True if we should cache blocks read in by this scanner.pread
- Use positional read rather than seek+read if true (pread is
better for random reads, seek+read is better scanning).public HFileScanner getScanner(boolean cacheBlocks, boolean pread, boolean isCompaction)
HFileScanner.seekTo(Cell)
to position an start the read. There is
nothing to clean up in a Scanner. Letting go of your references to the
scanner is sufficient.getScanner
in interface HFile.Reader
cacheBlocks
- True if we should cache blocks read in by this scanner.pread
- Use positional read rather than seek+read if true (pread is better
for random reads, seek+read is better scanning).isCompaction
- is scanner being used for a compaction?public int getMajorVersion()
public void unbufferStream()
HFile.Reader
unbufferStream
in interface HFile.Reader
Copyright © 2007–2019 Cloudera. All rights reserved.