001/*
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hbase.io.hfile;
019
020import java.io.IOException;
021import org.apache.hadoop.conf.Configuration;
022import org.apache.hadoop.fs.Path;
023import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
024import org.apache.yetus.audience.InterfaceAudience;
025import org.slf4j.Logger;
026import org.slf4j.LoggerFactory;
027
028/**
029 * Implementation of {@link HFile.Reader} to deal with pread.
030 */
031@InterfaceAudience.Private
032public class HFilePreadReader extends HFileReaderImpl {
033  private static final Logger LOG = LoggerFactory.getLogger(HFileReaderImpl.class);
034
035  private static final int WAIT_TIME_FOR_CACHE_INITIALIZATION = 10 * 60 * 1000;
036
037  public HFilePreadReader(ReaderContext context, HFileInfo fileInfo, CacheConfig cacheConf,
038    Configuration conf) throws IOException {
039    super(context, fileInfo, cacheConf, conf);
040    // master hosted regions, like the master procedures store wouldn't have a block cache
041    // Prefetch file blocks upon open if requested
042    if (cacheConf.getBlockCache().isPresent() && cacheConf.shouldPrefetchOnOpen()) {
043      PrefetchExecutor.request(path, new Runnable() {
044        @Override
045        public void run() {
046          long offset = 0;
047          long end = 0;
048          HFile.Reader prefetchStreamReader = null;
049          try {
050            cacheConf.getBlockCache().ifPresent(
051              cache -> cache.waitForCacheInitialization(WAIT_TIME_FOR_CACHE_INITIALIZATION));
052            ReaderContext streamReaderContext = ReaderContextBuilder.newBuilder(context)
053              .withReaderType(ReaderContext.ReaderType.STREAM)
054              .withInputStreamWrapper(new FSDataInputStreamWrapper(context.getFileSystem(),
055                context.getInputStreamWrapper().getReaderPath()))
056              .build();
057            prefetchStreamReader =
058              new HFileStreamReader(streamReaderContext, fileInfo, cacheConf, conf);
059            end = getTrailer().getLoadOnOpenDataOffset();
060            if (LOG.isTraceEnabled()) {
061              LOG.trace("Prefetch start " + getPathOffsetEndStr(path, offset, end));
062            }
063            // Don't use BlockIterator here, because it's designed to read load-on-open section.
064            long onDiskSizeOfNextBlock = -1;
065            // if we are here, block cache is present anyways
066            BlockCache cache = cacheConf.getBlockCache().get();
067            boolean interrupted = false;
068            int blockCount = 0;
069            int dataBlockCount = 0;
070            while (offset < end) {
071              if (Thread.interrupted()) {
072                break;
073              }
074              // Some cache implementations can be persistent and resilient to restarts,
075              // so we check first if the block exists on its in-memory index, if so, we just
076              // update the offset and move on to the next block without actually going read all
077              // the way to the cache.
078              BlockCacheKey cacheKey = new BlockCacheKey(name, offset);
079              if (cache.isAlreadyCached(cacheKey).orElse(false)) {
080                // Right now, isAlreadyCached is only supported by BucketCache, which should
081                // always cache data blocks.
082                int size = cache.getBlockSize(cacheKey).orElse(0);
083                if (size > 0) {
084                  offset += size;
085                  LOG.debug("Found block of size {} for cache key {}. "
086                    + "Skipping prefetch, the block is already cached.", size, cacheKey);
087                  blockCount++;
088                  dataBlockCount++;
089                  // We need to reset this here, because we don't know the size of next block, since
090                  // we never recovered the current block.
091                  onDiskSizeOfNextBlock = -1;
092                  continue;
093                } else {
094                  LOG.debug("Found block for cache key {}, but couldn't get its size. "
095                    + "Maybe the cache implementation doesn't support it? "
096                    + "We'll need to read the block from cache or file system. ", cacheKey);
097                }
098              } else {
099                LOG.debug("No entry in the backing map for cache key {}. ", cacheKey);
100              }
101              // Perhaps we got our block from cache? Unlikely as this may be, if it happens, then
102              // the internal-to-hfileblock thread local which holds the overread that gets the
103              // next header, will not have happened...so, pass in the onDiskSize gotten from the
104              // cached block. This 'optimization' triggers extremely rarely I'd say.
105              HFileBlock block = prefetchStreamReader.readBlock(offset, onDiskSizeOfNextBlock,
106                /* cacheBlock= */true, /* pread= */false, false, false, null, null, true);
107              try {
108                if (!cacheConf.isInMemory()) {
109                  if (!cache.blockFitsIntoTheCache(block).orElse(true)) {
110                    LOG.warn(
111                      "Interrupting prefetch for file {} because block {} of size {} "
112                        + "doesn't fit in the available cache space. isCacheEnabled: {}",
113                      path, cacheKey, block.getOnDiskSizeWithHeader(), cache.isCacheEnabled());
114                    interrupted = true;
115                    break;
116                  }
117                  if (!cacheConf.isHeapUsageBelowThreshold()) {
118                    LOG.warn(
119                      "Interrupting prefetch because heap usage is above the threshold: {} "
120                        + "configured via {}",
121                      cacheConf.getHeapUsageThreshold(), CacheConfig.PREFETCH_HEAP_USAGE_THRESHOLD);
122                    interrupted = true;
123                    break;
124                  }
125                }
126                onDiskSizeOfNextBlock = block.getNextBlockOnDiskSize();
127                offset += block.getOnDiskSizeWithHeader();
128                blockCount++;
129                if (block.getBlockType().isData()) {
130                  dataBlockCount++;
131                }
132              } finally {
133                // Ideally here the readBlock won't find the block in cache. We call this
134                // readBlock so that block data is read from FS and cached in BC. we must call
135                // returnBlock here to decrease the reference count of block.
136                block.release();
137              }
138            }
139            if (!interrupted) {
140              cacheConf.getBlockCache().get().notifyFileCachingCompleted(path, blockCount,
141                dataBlockCount, offset);
142            }
143          } catch (IOException e) {
144            // IOExceptions are probably due to region closes (relocation, etc.)
145            if (LOG.isDebugEnabled()) {
146              LOG.debug("Prefetch " + getPathOffsetEndStr(path, offset, end), e);
147            }
148          } catch (Throwable e) {
149            // Other exceptions are interesting
150            LOG.warn("Prefetch " + getPathOffsetEndStr(path, offset, end), e);
151          } finally {
152            if (prefetchStreamReader != null) {
153              try {
154                prefetchStreamReader.close(false);
155              } catch (IOException e) {
156                LOG.warn("Close prefetch stream reader failed, path: " + path, e);
157              }
158            }
159            PrefetchExecutor.complete(path);
160          }
161        }
162      });
163    }
164  }
165
166  /*
167   * Get the region name for the given file path. A HFile is always kept under the <region>/<column
168   * family>/<hfile>. To find the region for a given hFile, just find the name of the grandparent
169   * directory.
170   */
171  private static String getRegionName(Path path) {
172    return path.getParent().getParent().getName();
173  }
174
175  private static String getPathOffsetEndStr(final Path path, final long offset, final long end) {
176    return "path=" + path.toString() + ", offset=" + offset + ", end=" + end;
177  }
178
179  public void close(boolean evictOnClose) throws IOException {
180    PrefetchExecutor.cancel(path);
181    // Deallocate blocks in load-on-open section
182    this.fileInfo.close();
183    // Deallocate data blocks
184    cacheConf.getBlockCache().ifPresent(cache -> {
185      if (evictOnClose) {
186        int numEvicted = cache.evictBlocksByHfileName(name);
187        if (LOG.isTraceEnabled()) {
188          LOG.trace("On close, file= {} evicted= {} block(s)", name, numEvicted);
189        }
190      }
191    });
192    fsBlockReader.closeStreams();
193  }
194}