001/*
002 *
003 * Licensed to the Apache Software Foundation (ASF) under one
004 * or more contributor license agreements.  See the NOTICE file
005 * distributed with this work for additional information
006 * regarding copyright ownership.  The ASF licenses this file
007 * to you under the Apache License, Version 2.0 (the
008 * "License"); you may not use this file except in compliance
009 * with the License.  You may obtain a copy of the License at
010 *
011 *     http://www.apache.org/licenses/LICENSE-2.0
012 *
013 * Unless required by applicable law or agreed to in writing, software
014 * distributed under the License is distributed on an "AS IS" BASIS,
015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
016 * See the License for the specific language governing permissions and
017 * limitations under the License.
018 */
019
020package org.apache.hadoop.hbase.client;
021
022import java.io.IOException;
023import java.util.ArrayList;
024import java.util.Arrays;
025import java.util.HashMap;
026import java.util.List;
027import java.util.Map;
028import java.util.NavigableSet;
029import java.util.TreeMap;
030import java.util.TreeSet;
031
032import org.apache.hadoop.hbase.HConstants;
033import org.apache.yetus.audience.InterfaceAudience;
034import org.slf4j.Logger;
035import org.slf4j.LoggerFactory;
036import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
037import org.apache.hadoop.hbase.filter.Filter;
038import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
039import org.apache.hadoop.hbase.io.TimeRange;
040import org.apache.hadoop.hbase.security.access.Permission;
041import org.apache.hadoop.hbase.security.visibility.Authorizations;
042import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
043import org.apache.hadoop.hbase.util.Bytes;
044
045/**
046 * Used to perform Scan operations.
047 * <p>
048 * All operations are identical to {@link Get} with the exception of instantiation. Rather than
049 * specifying a single row, an optional startRow and stopRow may be defined. If rows are not
050 * specified, the Scanner will iterate over all rows.
051 * <p>
052 * To get all columns from all rows of a Table, create an instance with no constraints; use the
053 * {@link #Scan()} constructor. To constrain the scan to specific column families, call
054 * {@link #addFamily(byte[]) addFamily} for each family to retrieve on your Scan instance.
055 * <p>
056 * To get specific columns, call {@link #addColumn(byte[], byte[]) addColumn} for each column to
057 * retrieve.
058 * <p>
059 * To only retrieve columns within a specific range of version timestamps, call
060 * {@link #setTimeRange(long, long) setTimeRange}.
061 * <p>
062 * To only retrieve columns with a specific timestamp, call {@link #setTimestamp(long) setTimestamp}
063 * .
064 * <p>
065 * To limit the number of versions of each column to be returned, call {@link #setMaxVersions(int)
066 * setMaxVersions}.
067 * <p>
068 * To limit the maximum number of values returned for each call to next(), call
069 * {@link #setBatch(int) setBatch}.
070 * <p>
071 * To add a filter, call {@link #setFilter(org.apache.hadoop.hbase.filter.Filter) setFilter}.
072 * <p>
073 * For small scan, it is deprecated in 2.0.0. Now we have a {@link #setLimit(int)} method in Scan
074 * object which is used to tell RS how many rows we want. If the rows return reaches the limit, the
075 * RS will close the RegionScanner automatically. And we will also fetch data when openScanner in
076 * the new implementation, this means we can also finish a scan operation in one rpc call. And we
077 * have also introduced a {@link #setReadType(ReadType)} method. You can use this method to tell RS
078 * to use pread explicitly.
079 * <p>
080 * Expert: To explicitly disable server-side block caching for this scan, execute
081 * {@link #setCacheBlocks(boolean)}.
082 * <p>
083 * <em>Note:</em> Usage alters Scan instances. Internally, attributes are updated as the Scan runs
084 * and if enabled, metrics accumulate in the Scan instance. Be aware this is the case when you go to
085 * clone a Scan instance or if you go to reuse a created Scan instance; safer is create a Scan
086 * instance per usage.
087 */
088@InterfaceAudience.Public
089public class Scan extends Query {
090  private static final Logger LOG = LoggerFactory.getLogger(Scan.class);
091
092  private static final String RAW_ATTR = "_raw_";
093
094  private byte[] startRow = HConstants.EMPTY_START_ROW;
095  private boolean includeStartRow = true;
096  private byte[] stopRow  = HConstants.EMPTY_END_ROW;
097  private boolean includeStopRow = false;
098  private int maxVersions = 1;
099  private int batch = -1;
100
101  /**
102   * Partial {@link Result}s are {@link Result}s must be combined to form a complete {@link Result}.
103   * The {@link Result}s had to be returned in fragments (i.e. as partials) because the size of the
104   * cells in the row exceeded max result size on the server. Typically partial results will be
105   * combined client side into complete results before being delivered to the caller. However, if
106   * this flag is set, the caller is indicating that they do not mind seeing partial results (i.e.
107   * they understand that the results returned from the Scanner may only represent part of a
108   * particular row). In such a case, any attempt to combine the partials into a complete result on
109   * the client side will be skipped, and the caller will be able to see the exact results returned
110   * from the server.
111   */
112  private boolean allowPartialResults = false;
113
114  private int storeLimit = -1;
115  private int storeOffset = 0;
116
117  /**
118   * @deprecated since 1.0.0. Use {@link #setScanMetricsEnabled(boolean)}
119   */
120  // Make private or remove.
121  @Deprecated
122  static public final String SCAN_ATTRIBUTES_METRICS_ENABLE = "scan.attributes.metrics.enable";
123
124  /**
125   * Use {@link #getScanMetrics()}
126   */
127  // Make this private or remove.
128  @Deprecated
129  static public final String SCAN_ATTRIBUTES_METRICS_DATA = "scan.attributes.metrics.data";
130
131  // If an application wants to use multiple scans over different tables each scan must
132  // define this attribute with the appropriate table name by calling
133  // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName))
134  static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name";
135
136  /**
137   * -1 means no caching specified and the value of {@link HConstants#HBASE_CLIENT_SCANNER_CACHING}
138   * (default to {@link HConstants#DEFAULT_HBASE_CLIENT_SCANNER_CACHING}) will be used
139   */
140  private int caching = -1;
141  private long maxResultSize = -1;
142  private boolean cacheBlocks = true;
143  private boolean reversed = false;
144  private TimeRange tr = TimeRange.allTime();
145  private Map<byte [], NavigableSet<byte []>> familyMap =
146    new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
147  private Boolean asyncPrefetch = null;
148
149  /**
150   * Parameter name for client scanner sync/async prefetch toggle.
151   * When using async scanner, prefetching data from the server is done at the background.
152   * The parameter currently won't have any effect in the case that the user has set
153   * Scan#setSmall or Scan#setReversed
154   */
155  public static final String HBASE_CLIENT_SCANNER_ASYNC_PREFETCH =
156      "hbase.client.scanner.async.prefetch";
157
158  /**
159   * Default value of {@link #HBASE_CLIENT_SCANNER_ASYNC_PREFETCH}.
160   */
161  public static final boolean DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH = false;
162
163  /**
164   * Set it true for small scan to get better performance Small scan should use pread and big scan
165   * can use seek + read seek + read is fast but can cause two problem (1) resource contention (2)
166   * cause too much network io [89-fb] Using pread for non-compaction read request
167   * https://issues.apache.org/jira/browse/HBASE-7266 On the other hand, if setting it true, we
168   * would do openScanner,next,closeScanner in one RPC call. It means the better performance for
169   * small scan. [HBASE-9488]. Generally, if the scan range is within one data block(64KB), it could
170   * be considered as a small scan.
171   */
172  private boolean small = false;
173
174  /**
175   * The mvcc read point to use when open a scanner. Remember to clear it after switching regions as
176   * the mvcc is only valid within region scope.
177   */
178  private long mvccReadPoint = -1L;
179
180  /**
181   * The number of rows we want for this scan. We will terminate the scan if the number of return
182   * rows reaches this value.
183   */
184  private int limit = -1;
185
186  /**
187   * Control whether to use pread at server side.
188   */
189  private ReadType readType = ReadType.DEFAULT;
190
191  private boolean needCursorResult = false;
192
193  /**
194   * Create a Scan operation across all rows.
195   */
196  public Scan() {}
197
198  /**
199   * @deprecated use {@code new Scan().withStartRow(startRow).setFilter(filter)} instead.
200   */
201  @Deprecated
202  public Scan(byte[] startRow, Filter filter) {
203    this(startRow);
204    this.filter = filter;
205  }
206
207  /**
208   * Create a Scan operation starting at the specified row.
209   * <p>
210   * If the specified row does not exist, the Scanner will start from the next closest row after the
211   * specified row.
212   * @param startRow row to start scanner at or after
213   * @deprecated use {@code new Scan().withStartRow(startRow)} instead.
214   */
215  @Deprecated
216  public Scan(byte[] startRow) {
217    setStartRow(startRow);
218  }
219
220  /**
221   * Create a Scan operation for the range of rows specified.
222   * @param startRow row to start scanner at or after (inclusive)
223   * @param stopRow row to stop scanner before (exclusive)
224   * @deprecated use {@code new Scan().withStartRow(startRow).withStopRow(stopRow)} instead.
225   */
226  @Deprecated
227  public Scan(byte[] startRow, byte[] stopRow) {
228    setStartRow(startRow);
229    setStopRow(stopRow);
230  }
231
232  /**
233   * Creates a new instance of this class while copying all values.
234   *
235   * @param scan  The scan instance to copy from.
236   * @throws IOException When copying the values fails.
237   */
238  public Scan(Scan scan) throws IOException {
239    startRow = scan.getStartRow();
240    includeStartRow = scan.includeStartRow();
241    stopRow  = scan.getStopRow();
242    includeStopRow = scan.includeStopRow();
243    maxVersions = scan.getMaxVersions();
244    batch = scan.getBatch();
245    storeLimit = scan.getMaxResultsPerColumnFamily();
246    storeOffset = scan.getRowOffsetPerColumnFamily();
247    caching = scan.getCaching();
248    maxResultSize = scan.getMaxResultSize();
249    cacheBlocks = scan.getCacheBlocks();
250    filter = scan.getFilter(); // clone?
251    loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
252    consistency = scan.getConsistency();
253    this.setIsolationLevel(scan.getIsolationLevel());
254    reversed = scan.isReversed();
255    asyncPrefetch = scan.isAsyncPrefetch();
256    small = scan.isSmall();
257    allowPartialResults = scan.getAllowPartialResults();
258    tr = scan.getTimeRange(); // TimeRange is immutable
259    Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
260    for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
261      byte [] fam = entry.getKey();
262      NavigableSet<byte[]> cols = entry.getValue();
263      if (cols != null && cols.size() > 0) {
264        for (byte[] col : cols) {
265          addColumn(fam, col);
266        }
267      } else {
268        addFamily(fam);
269      }
270    }
271    for (Map.Entry<String, byte[]> attr : scan.getAttributesMap().entrySet()) {
272      setAttribute(attr.getKey(), attr.getValue());
273    }
274    for (Map.Entry<byte[], TimeRange> entry : scan.getColumnFamilyTimeRange().entrySet()) {
275      TimeRange tr = entry.getValue();
276      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
277    }
278    this.mvccReadPoint = scan.getMvccReadPoint();
279    this.limit = scan.getLimit();
280    this.needCursorResult = scan.isNeedCursorResult();
281    setPriority(scan.getPriority());
282    readType = scan.getReadType();
283    super.setReplicaId(scan.getReplicaId());
284  }
285
286  /**
287   * Builds a scan object with the same specs as get.
288   * @param get get to model scan after
289   */
290  public Scan(Get get) {
291    this.startRow = get.getRow();
292    this.includeStartRow = true;
293    this.stopRow = get.getRow();
294    this.includeStopRow = true;
295    this.filter = get.getFilter();
296    this.cacheBlocks = get.getCacheBlocks();
297    this.maxVersions = get.getMaxVersions();
298    this.storeLimit = get.getMaxResultsPerColumnFamily();
299    this.storeOffset = get.getRowOffsetPerColumnFamily();
300    this.tr = get.getTimeRange();
301    this.familyMap = get.getFamilyMap();
302    this.asyncPrefetch = false;
303    this.consistency = get.getConsistency();
304    this.setIsolationLevel(get.getIsolationLevel());
305    this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue();
306    for (Map.Entry<String, byte[]> attr : get.getAttributesMap().entrySet()) {
307      setAttribute(attr.getKey(), attr.getValue());
308    }
309    for (Map.Entry<byte[], TimeRange> entry : get.getColumnFamilyTimeRange().entrySet()) {
310      TimeRange tr = entry.getValue();
311      setColumnFamilyTimeRange(entry.getKey(), tr.getMin(), tr.getMax());
312    }
313    this.mvccReadPoint = -1L;
314    setPriority(get.getPriority());
315    super.setReplicaId(get.getReplicaId());
316  }
317
318  public boolean isGetScan() {
319    return includeStartRow && includeStopRow
320        && ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow);
321  }
322
323  /**
324   * Get all columns from the specified family.
325   * <p>
326   * Overrides previous calls to addColumn for this family.
327   * @param family family name
328   * @return this
329   */
330  public Scan addFamily(byte [] family) {
331    familyMap.remove(family);
332    familyMap.put(family, null);
333    return this;
334  }
335
336  /**
337   * Get the column from the specified family with the specified qualifier.
338   * <p>
339   * Overrides previous calls to addFamily for this family.
340   * @param family family name
341   * @param qualifier column qualifier
342   * @return this
343   */
344  public Scan addColumn(byte [] family, byte [] qualifier) {
345    NavigableSet<byte []> set = familyMap.get(family);
346    if(set == null) {
347      set = new TreeSet<>(Bytes.BYTES_COMPARATOR);
348      familyMap.put(family, set);
349    }
350    if (qualifier == null) {
351      qualifier = HConstants.EMPTY_BYTE_ARRAY;
352    }
353    set.add(qualifier);
354    return this;
355  }
356
357  /**
358   * Get versions of columns only within the specified timestamp range,
359   * [minStamp, maxStamp).  Note, default maximum versions to return is 1.  If
360   * your time range spans more than one version and you want all versions
361   * returned, up the number of versions beyond the default.
362   * @param minStamp minimum timestamp value, inclusive
363   * @param maxStamp maximum timestamp value, exclusive
364   * @see #setMaxVersions()
365   * @see #setMaxVersions(int)
366   * @return this
367   */
368  public Scan setTimeRange(long minStamp, long maxStamp) throws IOException {
369    tr = new TimeRange(minStamp, maxStamp);
370    return this;
371  }
372
373  /**
374   * Get versions of columns with the specified timestamp. Note, default maximum
375   * versions to return is 1.  If your time range spans more than one version
376   * and you want all versions returned, up the number of versions beyond the
377   * defaut.
378   * @param timestamp version timestamp
379   * @see #setMaxVersions()
380   * @see #setMaxVersions(int)
381   * @return this
382   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
383   *             Use {@link #setTimestamp(long)} instead
384   */
385  @Deprecated
386  public Scan setTimeStamp(long timestamp)
387  throws IOException {
388    return this.setTimestamp(timestamp);
389  }
390
391  /**
392   * Get versions of columns with the specified timestamp. Note, default maximum
393   * versions to return is 1.  If your time range spans more than one version
394   * and you want all versions returned, up the number of versions beyond the
395   * defaut.
396   * @param timestamp version timestamp
397   * @see #setMaxVersions()
398   * @see #setMaxVersions(int)
399   * @return this
400   */
401  public Scan setTimestamp(long timestamp) {
402    try {
403      tr = new TimeRange(timestamp, timestamp + 1);
404    } catch(Exception e) {
405      // This should never happen, unless integer overflow or something extremely wrong...
406      LOG.error("TimeRange failed, likely caused by integer overflow. ", e);
407      throw e;
408    }
409
410    return this;
411  }
412
413  @Override public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) {
414    return (Scan) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp);
415  }
416
417  /**
418   * Set the start row of the scan.
419   * <p>
420   * If the specified row does not exist, the Scanner will start from the next closest row after the
421   * specified row.
422   * @param startRow row to start scanner at or after
423   * @return this
424   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
425   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
426   * @deprecated use {@link #withStartRow(byte[])} instead. This method may change the inclusive of
427   *             the stop row to keep compatible with the old behavior.
428   */
429  @Deprecated
430  public Scan setStartRow(byte[] startRow) {
431    withStartRow(startRow);
432    if (ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow)) {
433      // for keeping the old behavior that a scan with the same start and stop row is a get scan.
434      this.includeStopRow = true;
435    }
436    return this;
437  }
438
439  /**
440   * Set the start row of the scan.
441   * <p>
442   * If the specified row does not exist, the Scanner will start from the next closest row after the
443   * specified row.
444   * @param startRow row to start scanner at or after
445   * @return this
446   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
447   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
448   */
449  public Scan withStartRow(byte[] startRow) {
450    return withStartRow(startRow, true);
451  }
452
453  /**
454   * Set the start row of the scan.
455   * <p>
456   * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner
457   * will start from the next closest row after the specified row.
458   * @param startRow row to start scanner at or after
459   * @param inclusive whether we should include the start row when scan
460   * @return this
461   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
462   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
463   */
464  public Scan withStartRow(byte[] startRow, boolean inclusive) {
465    if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
466      throw new IllegalArgumentException("startRow's length must be less than or equal to "
467          + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
468    }
469    this.startRow = startRow;
470    this.includeStartRow = inclusive;
471    return this;
472  }
473
474  /**
475   * Set the stop row of the scan.
476   * <p>
477   * The scan will include rows that are lexicographically less than the provided stopRow.
478   * <p>
479   * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
480   * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
481   * </p>
482   * @param stopRow row to end at (exclusive)
483   * @return this
484   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
485   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
486   * @deprecated use {@link #withStopRow(byte[])} instead. This method may change the inclusive of
487   *             the stop row to keep compatible with the old behavior.
488   */
489  @Deprecated
490  public Scan setStopRow(byte[] stopRow) {
491    withStopRow(stopRow);
492    if (ClientUtil.areScanStartRowAndStopRowEqual(this.startRow, this.stopRow)) {
493      // for keeping the old behavior that a scan with the same start and stop row is a get scan.
494      this.includeStopRow = true;
495    }
496    return this;
497  }
498
499  /**
500   * Set the stop row of the scan.
501   * <p>
502   * The scan will include rows that are lexicographically less than the provided stopRow.
503   * <p>
504   * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
505   * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
506   * </p>
507   * @param stopRow row to end at (exclusive)
508   * @return this
509   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
510   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
511   */
512  public Scan withStopRow(byte[] stopRow) {
513    return withStopRow(stopRow, false);
514  }
515
516  /**
517   * Set the stop row of the scan.
518   * <p>
519   * The scan will include rows that are lexicographically less than (or equal to if
520   * {@code inclusive} is {@code true}) the provided stopRow.
521   * @param stopRow row to end at
522   * @param inclusive whether we should include the stop row when scan
523   * @return this
524   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
525   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
526   */
527  public Scan withStopRow(byte[] stopRow, boolean inclusive) {
528    if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
529      throw new IllegalArgumentException("stopRow's length must be less than or equal to "
530          + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
531    }
532    this.stopRow = stopRow;
533    this.includeStopRow = inclusive;
534    return this;
535  }
536
537  /**
538   * <p>Set a filter (using stopRow and startRow) so the result set only contains rows where the
539   * rowKey starts with the specified prefix.</p>
540   * <p>This is a utility method that converts the desired rowPrefix into the appropriate values
541   * for the startRow and stopRow to achieve the desired result.</p>
542   * <p>This can safely be used in combination with setFilter.</p>
543   * <p><b>NOTE: Doing a {@link #setStartRow(byte[])} and/or {@link #setStopRow(byte[])}
544   * after this method will yield undefined results.</b></p>
545   * @param rowPrefix the prefix all rows must start with. (Set <i>null</i> to remove the filter.)
546   * @return this
547   */
548  public Scan setRowPrefixFilter(byte[] rowPrefix) {
549    if (rowPrefix == null) {
550      setStartRow(HConstants.EMPTY_START_ROW);
551      setStopRow(HConstants.EMPTY_END_ROW);
552    } else {
553      this.setStartRow(rowPrefix);
554      this.setStopRow(calculateTheClosestNextRowKeyForPrefix(rowPrefix));
555    }
556    return this;
557  }
558
559  /**
560   * <p>When scanning for a prefix the scan should stop immediately after the the last row that
561   * has the specified prefix. This method calculates the closest next rowKey immediately following
562   * the given rowKeyPrefix.</p>
563   * <p><b>IMPORTANT: This converts a rowKey<u>Prefix</u> into a rowKey</b>.</p>
564   * <p>If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can
565   * simply increment the last byte of the array.
566   * But if your application uses real binary rowids you may run into the scenario that your
567   * prefix is something like:</p>
568   * &nbsp;&nbsp;&nbsp;<b>{ 0x12, 0x23, 0xFF, 0xFF }</b><br/>
569   * Then this stopRow needs to be fed into the actual scan<br/>
570   * &nbsp;&nbsp;&nbsp;<b>{ 0x12, 0x24 }</b> (Notice that it is shorter now)<br/>
571   * This method calculates the correct stop row value for this usecase.
572   *
573   * @param rowKeyPrefix the rowKey<u>Prefix</u>.
574   * @return the closest next rowKey immediately following the given rowKeyPrefix.
575   */
576  private byte[] calculateTheClosestNextRowKeyForPrefix(byte[] rowKeyPrefix) {
577    // Essentially we are treating it like an 'unsigned very very long' and doing +1 manually.
578    // Search for the place where the trailing 0xFFs start
579    int offset = rowKeyPrefix.length;
580    while (offset > 0) {
581      if (rowKeyPrefix[offset - 1] != (byte) 0xFF) {
582        break;
583      }
584      offset--;
585    }
586
587    if (offset == 0) {
588      // We got an 0xFFFF... (only FFs) stopRow value which is
589      // the last possible prefix before the end of the table.
590      // So set it to stop at the 'end of the table'
591      return HConstants.EMPTY_END_ROW;
592    }
593
594    // Copy the right length of the original
595    byte[] newStopRow = Arrays.copyOfRange(rowKeyPrefix, 0, offset);
596    // And increment the last one
597    newStopRow[newStopRow.length - 1]++;
598    return newStopRow;
599  }
600
601  /**
602   * Get all available versions.
603   * @return this
604   * @deprecated It is easy to misunderstand with column family's max versions, so use
605   *             {@link #readAllVersions()} instead.
606   */
607  @Deprecated
608  public Scan setMaxVersions() {
609    return readAllVersions();
610  }
611
612  /**
613   * Get up to the specified number of versions of each column.
614   * @param maxVersions maximum versions for each column
615   * @return this
616   * @deprecated It is easy to misunderstand with column family's max versions, so use
617   *             {@link #readVersions(int)} instead.
618   */
619  @Deprecated
620  public Scan setMaxVersions(int maxVersions) {
621    return readVersions(maxVersions);
622  }
623
624  /**
625   * Get all available versions.
626   * @return this
627   */
628  public Scan readAllVersions() {
629    this.maxVersions = Integer.MAX_VALUE;
630    return this;
631  }
632
633  /**
634   * Get up to the specified number of versions of each column.
635   * @param versions specified number of versions for each column
636   * @return this
637   */
638  public Scan readVersions(int versions) {
639    this.maxVersions = versions;
640    return this;
641  }
642
643  /**
644   * Set the maximum number of cells to return for each call to next(). Callers should be aware
645   * that this is not equivalent to calling {@link #setAllowPartialResults(boolean)}.
646   * If you don't allow partial results, the number of cells in each Result must equal to your
647   * batch setting unless it is the last Result for current row. So this method is helpful in paging
648   * queries. If you just want to prevent OOM at client, use setAllowPartialResults(true) is better.
649   * @param batch the maximum number of values
650   * @see Result#mayHaveMoreCellsInRow()
651   */
652  public Scan setBatch(int batch) {
653    if (this.hasFilter() && this.filter.hasFilterRow()) {
654      throw new IncompatibleFilterException(
655        "Cannot set batch on a scan using a filter" +
656        " that returns true for filter.hasFilterRow");
657    }
658    this.batch = batch;
659    return this;
660  }
661
662  /**
663   * Set the maximum number of values to return per row per Column Family
664   * @param limit the maximum number of values returned / row / CF
665   */
666  public Scan setMaxResultsPerColumnFamily(int limit) {
667    this.storeLimit = limit;
668    return this;
669  }
670
671  /**
672   * Set offset for the row per Column Family.
673   * @param offset is the number of kvs that will be skipped.
674   */
675  public Scan setRowOffsetPerColumnFamily(int offset) {
676    this.storeOffset = offset;
677    return this;
678  }
679
680  /**
681   * Set the number of rows for caching that will be passed to scanners.
682   * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
683   * apply.
684   * Higher caching values will enable faster scanners but will use more memory.
685   * @param caching the number of rows for caching
686   */
687  public Scan setCaching(int caching) {
688    this.caching = caching;
689    return this;
690  }
691
692  /**
693   * @return the maximum result size in bytes. See {@link #setMaxResultSize(long)}
694   */
695  public long getMaxResultSize() {
696    return maxResultSize;
697  }
698
699  /**
700   * Set the maximum result size. The default is -1; this means that no specific
701   * maximum result size will be set for this scan, and the global configured
702   * value will be used instead. (Defaults to unlimited).
703   *
704   * @param maxResultSize The maximum result size in bytes.
705   */
706  public Scan setMaxResultSize(long maxResultSize) {
707    this.maxResultSize = maxResultSize;
708    return this;
709  }
710
711  @Override
712  public Scan setFilter(Filter filter) {
713    super.setFilter(filter);
714    return this;
715  }
716
717  /**
718   * Setting the familyMap
719   * @param familyMap map of family to qualifier
720   * @return this
721   */
722  public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) {
723    this.familyMap = familyMap;
724    return this;
725  }
726
727  /**
728   * Getting the familyMap
729   * @return familyMap
730   */
731  public Map<byte [], NavigableSet<byte []>> getFamilyMap() {
732    return this.familyMap;
733  }
734
735  /**
736   * @return the number of families in familyMap
737   */
738  public int numFamilies() {
739    if(hasFamilies()) {
740      return this.familyMap.size();
741    }
742    return 0;
743  }
744
745  /**
746   * @return true if familyMap is non empty, false otherwise
747   */
748  public boolean hasFamilies() {
749    return !this.familyMap.isEmpty();
750  }
751
752  /**
753   * @return the keys of the familyMap
754   */
755  public byte[][] getFamilies() {
756    if(hasFamilies()) {
757      return this.familyMap.keySet().toArray(new byte[0][0]);
758    }
759    return null;
760  }
761
762  /**
763   * @return the startrow
764   */
765  public byte [] getStartRow() {
766    return this.startRow;
767  }
768
769  /**
770   * @return if we should include start row when scan
771   */
772  public boolean includeStartRow() {
773    return includeStartRow;
774  }
775
776  /**
777   * @return the stoprow
778   */
779  public byte[] getStopRow() {
780    return this.stopRow;
781  }
782
783  /**
784   * @return if we should include stop row when scan
785   */
786  public boolean includeStopRow() {
787    return includeStopRow;
788  }
789
790  /**
791   * @return the max number of versions to fetch
792   */
793  public int getMaxVersions() {
794    return this.maxVersions;
795  }
796
797  /**
798   * @return maximum number of values to return for a single call to next()
799   */
800  public int getBatch() {
801    return this.batch;
802  }
803
804  /**
805   * @return maximum number of values to return per row per CF
806   */
807  public int getMaxResultsPerColumnFamily() {
808    return this.storeLimit;
809  }
810
811  /**
812   * Method for retrieving the scan's offset per row per column
813   * family (#kvs to be skipped)
814   * @return row offset
815   */
816  public int getRowOffsetPerColumnFamily() {
817    return this.storeOffset;
818  }
819
820  /**
821   * @return caching the number of rows fetched when calling next on a scanner
822   */
823  public int getCaching() {
824    return this.caching;
825  }
826
827  /**
828   * @return TimeRange
829   */
830  public TimeRange getTimeRange() {
831    return this.tr;
832  }
833
834  /**
835   * @return RowFilter
836   */
837  @Override
838  public Filter getFilter() {
839    return filter;
840  }
841
842  /**
843   * @return true is a filter has been specified, false if not
844   */
845  public boolean hasFilter() {
846    return filter != null;
847  }
848
849  /**
850   * Set whether blocks should be cached for this Scan.
851   * <p>
852   * This is true by default.  When true, default settings of the table and
853   * family are used (this will never override caching blocks if the block
854   * cache is disabled for that family or entirely).
855   *
856   * @param cacheBlocks if false, default settings are overridden and blocks
857   * will not be cached
858   */
859  public Scan setCacheBlocks(boolean cacheBlocks) {
860    this.cacheBlocks = cacheBlocks;
861    return this;
862  }
863
864  /**
865   * Get whether blocks should be cached for this Scan.
866   * @return true if default caching should be used, false if blocks should not
867   * be cached
868   */
869  public boolean getCacheBlocks() {
870    return cacheBlocks;
871  }
872
873  /**
874   * Set whether this scan is a reversed one
875   * <p>
876   * This is false by default which means forward(normal) scan.
877   *
878   * @param reversed if true, scan will be backward order
879   * @return this
880   */
881  public Scan setReversed(boolean reversed) {
882    this.reversed = reversed;
883    return this;
884  }
885
886  /**
887   * Get whether this scan is a reversed one.
888   * @return true if backward scan, false if forward(default) scan
889   */
890  public boolean isReversed() {
891    return reversed;
892  }
893
894  /**
895   * Setting whether the caller wants to see the partial results when server returns
896   * less-than-expected cells. It is helpful while scanning a huge row to prevent OOM at client.
897   * By default this value is false and the complete results will be assembled client side
898   * before being delivered to the caller.
899   * @param allowPartialResults
900   * @return this
901   * @see Result#mayHaveMoreCellsInRow()
902   * @see #setBatch(int)
903   */
904  public Scan setAllowPartialResults(final boolean allowPartialResults) {
905    this.allowPartialResults = allowPartialResults;
906    return this;
907  }
908
909  /**
910   * @return true when the constructor of this scan understands that the results they will see may
911   *         only represent a partial portion of a row. The entire row would be retrieved by
912   *         subsequent calls to {@link ResultScanner#next()}
913   */
914  public boolean getAllowPartialResults() {
915    return allowPartialResults;
916  }
917
918  @Override
919  public Scan setLoadColumnFamiliesOnDemand(boolean value) {
920    return (Scan) super.setLoadColumnFamiliesOnDemand(value);
921  }
922
923  /**
924   * Compile the table and column family (i.e. schema) information
925   * into a String. Useful for parsing and aggregation by debugging,
926   * logging, and administration tools.
927   * @return Map
928   */
929  @Override
930  public Map<String, Object> getFingerprint() {
931    Map<String, Object> map = new HashMap<>();
932    List<String> families = new ArrayList<>();
933    if(this.familyMap.isEmpty()) {
934      map.put("families", "ALL");
935      return map;
936    } else {
937      map.put("families", families);
938    }
939    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
940        this.familyMap.entrySet()) {
941      families.add(Bytes.toStringBinary(entry.getKey()));
942    }
943    return map;
944  }
945
946  /**
947   * Compile the details beyond the scope of getFingerprint (row, columns,
948   * timestamps, etc.) into a Map along with the fingerprinted information.
949   * Useful for debugging, logging, and administration tools.
950   * @param maxCols a limit on the number of columns output prior to truncation
951   * @return Map
952   */
953  @Override
954  public Map<String, Object> toMap(int maxCols) {
955    // start with the fingerpring map and build on top of it
956    Map<String, Object> map = getFingerprint();
957    // map from families to column list replaces fingerprint's list of families
958    Map<String, List<String>> familyColumns = new HashMap<>();
959    map.put("families", familyColumns);
960    // add scalar information first
961    map.put("startRow", Bytes.toStringBinary(this.startRow));
962    map.put("stopRow", Bytes.toStringBinary(this.stopRow));
963    map.put("maxVersions", this.maxVersions);
964    map.put("batch", this.batch);
965    map.put("caching", this.caching);
966    map.put("maxResultSize", this.maxResultSize);
967    map.put("cacheBlocks", this.cacheBlocks);
968    map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
969    List<Long> timeRange = new ArrayList<>(2);
970    timeRange.add(this.tr.getMin());
971    timeRange.add(this.tr.getMax());
972    map.put("timeRange", timeRange);
973    int colCount = 0;
974    // iterate through affected families and list out up to maxCols columns
975    for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
976      this.familyMap.entrySet()) {
977      List<String> columns = new ArrayList<>();
978      familyColumns.put(Bytes.toStringBinary(entry.getKey()), columns);
979      if(entry.getValue() == null) {
980        colCount++;
981        --maxCols;
982        columns.add("ALL");
983      } else {
984        colCount += entry.getValue().size();
985        if (maxCols <= 0) {
986          continue;
987        }
988        for (byte [] column : entry.getValue()) {
989          if (--maxCols <= 0) {
990            continue;
991          }
992          columns.add(Bytes.toStringBinary(column));
993        }
994      }
995    }
996    map.put("totalColumns", colCount);
997    if (this.filter != null) {
998      map.put("filter", this.filter.toString());
999    }
1000    // add the id if set
1001    if (getId() != null) {
1002      map.put("id", getId());
1003    }
1004    return map;
1005  }
1006
1007  /**
1008   * Enable/disable "raw" mode for this scan.
1009   * If "raw" is enabled the scan will return all
1010   * delete marker and deleted rows that have not
1011   * been collected, yet.
1012   * This is mostly useful for Scan on column families
1013   * that have KEEP_DELETED_ROWS enabled.
1014   * It is an error to specify any column when "raw" is set.
1015   * @param raw True/False to enable/disable "raw" mode.
1016   */
1017  public Scan setRaw(boolean raw) {
1018    setAttribute(RAW_ATTR, Bytes.toBytes(raw));
1019    return this;
1020  }
1021
1022  /**
1023   * @return True if this Scan is in "raw" mode.
1024   */
1025  public boolean isRaw() {
1026    byte[] attr = getAttribute(RAW_ATTR);
1027    return attr == null ? false : Bytes.toBoolean(attr);
1028  }
1029
1030  /**
1031   * Set whether this scan is a small scan
1032   * <p>
1033   * Small scan should use pread and big scan can use seek + read seek + read is fast but can cause
1034   * two problem (1) resource contention (2) cause too much network io [89-fb] Using pread for
1035   * non-compaction read request https://issues.apache.org/jira/browse/HBASE-7266 On the other hand,
1036   * if setting it true, we would do openScanner,next,closeScanner in one RPC call. It means the
1037   * better performance for small scan. [HBASE-9488]. Generally, if the scan range is within one
1038   * data block(64KB), it could be considered as a small scan.
1039   * @param small
1040   * @deprecated since 2.0.0. Use {@link #setLimit(int)} and {@link #setReadType(ReadType)} instead.
1041   *             And for the one rpc optimization, now we will also fetch data when openScanner, and
1042   *             if the number of rows reaches the limit then we will close the scanner
1043   *             automatically which means we will fall back to one rpc.
1044   * @see #setLimit(int)
1045   * @see #setReadType(ReadType)
1046   */
1047  @Deprecated
1048  public Scan setSmall(boolean small) {
1049    this.small = small;
1050    this.readType = ReadType.PREAD;
1051    return this;
1052  }
1053
1054  /**
1055   * Get whether this scan is a small scan
1056   * @return true if small scan
1057   * @deprecated since 2.0.0. See the comment of {@link #setSmall(boolean)}
1058   */
1059  @Deprecated
1060  public boolean isSmall() {
1061    return small;
1062  }
1063
1064  @Override
1065  public Scan setAttribute(String name, byte[] value) {
1066    return (Scan) super.setAttribute(name, value);
1067  }
1068
1069  @Override
1070  public Scan setId(String id) {
1071    return (Scan) super.setId(id);
1072  }
1073
1074  @Override
1075  public Scan setAuthorizations(Authorizations authorizations) {
1076    return (Scan) super.setAuthorizations(authorizations);
1077  }
1078
1079  @Override
1080  public Scan setACL(Map<String, Permission> perms) {
1081    return (Scan) super.setACL(perms);
1082  }
1083
1084  @Override
1085  public Scan setACL(String user, Permission perms) {
1086    return (Scan) super.setACL(user, perms);
1087  }
1088
1089  @Override
1090  public Scan setConsistency(Consistency consistency) {
1091    return (Scan) super.setConsistency(consistency);
1092  }
1093
1094  @Override
1095  public Scan setReplicaId(int Id) {
1096    return (Scan) super.setReplicaId(Id);
1097  }
1098
1099  @Override
1100  public Scan setIsolationLevel(IsolationLevel level) {
1101    return (Scan) super.setIsolationLevel(level);
1102  }
1103
1104  @Override
1105  public Scan setPriority(int priority) {
1106    return (Scan) super.setPriority(priority);
1107  }
1108
1109  /**
1110   * Enable collection of {@link ScanMetrics}. For advanced users.
1111   * @param enabled Set to true to enable accumulating scan metrics
1112   */
1113  public Scan setScanMetricsEnabled(final boolean enabled) {
1114    setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE, Bytes.toBytes(Boolean.valueOf(enabled)));
1115    return this;
1116  }
1117
1118  /**
1119   * @return True if collection of scan metrics is enabled. For advanced users.
1120   */
1121  public boolean isScanMetricsEnabled() {
1122    byte[] attr = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_ENABLE);
1123    return attr == null ? false : Bytes.toBoolean(attr);
1124  }
1125
1126  /**
1127   * @return Metrics on this Scan, if metrics were enabled.
1128   * @see #setScanMetricsEnabled(boolean)
1129   * @deprecated Use {@link ResultScanner#getScanMetrics()} instead. And notice that, please do not
1130   *             use this method and {@link ResultScanner#getScanMetrics()} together, the metrics
1131   *             will be messed up.
1132   */
1133  @Deprecated
1134  public ScanMetrics getScanMetrics() {
1135    byte[] bytes = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
1136    if (bytes == null) return null;
1137    return ProtobufUtil.toScanMetrics(bytes);
1138  }
1139
1140  public Boolean isAsyncPrefetch() {
1141    return asyncPrefetch;
1142  }
1143
1144  public Scan setAsyncPrefetch(boolean asyncPrefetch) {
1145    this.asyncPrefetch = asyncPrefetch;
1146    return this;
1147  }
1148
1149  /**
1150   * @return the limit of rows for this scan
1151   */
1152  public int getLimit() {
1153    return limit;
1154  }
1155
1156  /**
1157   * Set the limit of rows for this scan. We will terminate the scan if the number of returned rows
1158   * reaches this value.
1159   * <p>
1160   * This condition will be tested at last, after all other conditions such as stopRow, filter, etc.
1161   * @param limit the limit of rows for this scan
1162   * @return this
1163   */
1164  public Scan setLimit(int limit) {
1165    this.limit = limit;
1166    return this;
1167  }
1168
1169  /**
1170   * Call this when you only want to get one row. It will set {@code limit} to {@code 1}, and also
1171   * set {@code readType} to {@link ReadType#PREAD}.
1172   * @return this
1173   */
1174  public Scan setOneRowLimit() {
1175    return setLimit(1).setReadType(ReadType.PREAD);
1176  }
1177
1178  @InterfaceAudience.Public
1179  public enum ReadType {
1180    DEFAULT, STREAM, PREAD
1181  }
1182
1183  /**
1184   * @return the read type for this scan
1185   */
1186  public ReadType getReadType() {
1187    return readType;
1188  }
1189
1190  /**
1191   * Set the read type for this scan.
1192   * <p>
1193   * Notice that we may choose to use pread even if you specific {@link ReadType#STREAM} here. For
1194   * example, we will always use pread if this is a get scan.
1195   * @return this
1196   */
1197  public Scan setReadType(ReadType readType) {
1198    this.readType = readType;
1199    return this;
1200  }
1201
1202  /**
1203   * Get the mvcc read point used to open a scanner.
1204   */
1205  long getMvccReadPoint() {
1206    return mvccReadPoint;
1207  }
1208
1209  /**
1210   * Set the mvcc read point used to open a scanner.
1211   */
1212  Scan setMvccReadPoint(long mvccReadPoint) {
1213    this.mvccReadPoint = mvccReadPoint;
1214    return this;
1215  }
1216
1217  /**
1218   * Set the mvcc read point to -1 which means do not use it.
1219   */
1220  Scan resetMvccReadPoint() {
1221    return setMvccReadPoint(-1L);
1222  }
1223
1224  /**
1225   * When the server is slow or we scan a table with many deleted data or we use a sparse filter,
1226   * the server will response heartbeat to prevent timeout. However the scanner will return a Result
1227   * only when client can do it. So if there are many heartbeats, the blocking time on
1228   * ResultScanner#next() may be very long, which is not friendly to online services.
1229   *
1230   * Set this to true then you can get a special Result whose #isCursor() returns true and is not
1231   * contains any real data. It only tells you where the server has scanned. You can call next
1232   * to continue scanning or open a new scanner with this row key as start row whenever you want.
1233   *
1234   * Users can get a cursor when and only when there is a response from the server but we can not
1235   * return a Result to users, for example, this response is a heartbeat or there are partial cells
1236   * but users do not allow partial result.
1237   *
1238   * Now the cursor is in row level which means the special Result will only contains a row key.
1239   * {@link Result#isCursor()}
1240   * {@link Result#getCursor()}
1241   * {@link Cursor}
1242   */
1243  public Scan setNeedCursorResult(boolean needCursorResult) {
1244    this.needCursorResult = needCursorResult;
1245    return this;
1246  }
1247
1248  public boolean isNeedCursorResult() {
1249    return needCursorResult;
1250  }
1251
1252  /**
1253   * Create a new Scan with a cursor. It only set the position information like start row key.
1254   * The others (like cfs, stop row, limit) should still be filled in by the user.
1255   * {@link Result#isCursor()}
1256   * {@link Result#getCursor()}
1257   * {@link Cursor}
1258   */
1259  public static Scan createScanFromCursor(Cursor cursor) {
1260    return new Scan().withStartRow(cursor.getRow());
1261  }
1262}