Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Licensed to the Apache Software Foundation (ASF) under one or more
0003  * contributor license agreements.  See the NOTICE file distributed with
0004  * this work for additional information regarding copyright ownership.
0005  * The ASF licenses this file to You under the Apache License, Version 2.0
0006  * (the "License"); you may not use this file except in compliance with
0007  * the License.  You may obtain a copy of the License at
0008  *
0009  *    http://www.apache.org/licenses/LICENSE-2.0
0010  *
0011  * Unless required by applicable law or agreed to in writing, software
0012  * distributed under the License is distributed on an "AS IS" BASIS,
0013  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0014  * See the License for the specific language governing permissions and
0015  * limitations under the License.
0016  */
0017 
0018 package org.apache.spark.sql.connector.catalog;
0019 
0020 import org.apache.spark.annotation.Evolving;
0021 
0022 /**
0023  * Capabilities that can be provided by a {@link Table} implementation.
0024  * <p>
0025  * Tables use {@link Table#capabilities()} to return a set of capabilities. Each capability signals
0026  * to Spark that the table supports a feature identified by the capability. For example, returning
0027  * {@link #BATCH_READ} allows Spark to read from the table using a batch scan.
0028  *
0029  * @since 3.0.0
0030  */
0031 @Evolving
0032 public enum TableCapability {
0033   /**
0034    * Signals that the table supports reads in batch execution mode.
0035    */
0036   BATCH_READ,
0037 
0038   /**
0039    * Signals that the table supports reads in micro-batch streaming execution mode.
0040    */
0041   MICRO_BATCH_READ,
0042 
0043   /**
0044    * Signals that the table supports reads in continuous streaming execution mode.
0045    */
0046   CONTINUOUS_READ,
0047 
0048   /**
0049    * Signals that the table supports append writes in batch execution mode.
0050    * <p>
0051    * Tables that return this capability must support appending data and may also support additional
0052    * write modes, like {@link #TRUNCATE}, {@link #OVERWRITE_BY_FILTER}, and
0053    * {@link #OVERWRITE_DYNAMIC}.
0054    */
0055   BATCH_WRITE,
0056 
0057   /**
0058    * Signals that the table supports append writes in streaming execution mode.
0059    * <p>
0060    * Tables that return this capability must support appending data and may also support additional
0061    * write modes, like {@link #TRUNCATE}, {@link #OVERWRITE_BY_FILTER}, and
0062    * {@link #OVERWRITE_DYNAMIC}.
0063    */
0064   STREAMING_WRITE,
0065 
0066   /**
0067    * Signals that the table can be truncated in a write operation.
0068    * <p>
0069    * Truncating a table removes all existing rows.
0070    * <p>
0071    * See {@link org.apache.spark.sql.connector.write.SupportsTruncate}.
0072    */
0073   TRUNCATE,
0074 
0075   /**
0076    * Signals that the table can replace existing data that matches a filter with appended data in
0077    * a write operation.
0078    * <p>
0079    * See {@link org.apache.spark.sql.connector.write.SupportsOverwrite}.
0080    */
0081   OVERWRITE_BY_FILTER,
0082 
0083   /**
0084    * Signals that the table can dynamically replace existing data partitions with appended data in
0085    * a write operation.
0086    * <p>
0087    * See {@link org.apache.spark.sql.connector.write.SupportsDynamicOverwrite}.
0088    */
0089   OVERWRITE_DYNAMIC,
0090 
0091   /**
0092    * Signals that the table accepts input of any schema in a write operation.
0093    */
0094   ACCEPT_ANY_SCHEMA,
0095 
0096   /**
0097    * Signals that the table supports append writes using the V1 InsertableRelation interface.
0098    * <p>
0099    * Tables that return this capability must create a V1WriteBuilder and may also support additional
0100    * write modes, like {@link #TRUNCATE}, and {@link #OVERWRITE_BY_FILTER}, but cannot support
0101    * {@link #OVERWRITE_DYNAMIC}.
0102    */
0103   V1_BATCH_WRITE
0104 }