diff --git a/backends-velox/src/main/scala/org/apache/gluten/config/VeloxConfig.scala b/backends-velox/src/main/scala/org/apache/gluten/config/VeloxConfig.scala index 898eacd18644..6dbf59f7610d 100644 --- a/backends-velox/src/main/scala/org/apache/gluten/config/VeloxConfig.scala +++ b/backends-velox/src/main/scala/org/apache/gluten/config/VeloxConfig.scala @@ -751,4 +751,40 @@ object VeloxConfig extends ConfigRegistry { .doc("Maps table field names to file field names using names, not indices for Parquet files.") .booleanConf .createWithDefault(true) + + val ICEBERG_WRITE_TARGET_FILE_SIZE_BYTES = + buildConf("spark.gluten.sql.columnar.backend.velox.iceberg.write.target-file-size-bytes") + .doc("Target file size in bytes for Iceberg write operations.") + .bytesConf(ByteUnit.BYTE) + .createWithDefaultString("512MB") + + val ICEBERG_WRITE_PARQUET_COMPRESSION_CODEC = + buildConf("spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.compression-codec") + .doc("Compression codec to use for Iceberg Parquet write operations.") + .stringConf + .createWithDefault("zstd") + + val ICEBERG_WRITE_PARQUET_COMPRESSION_LEVEL = + buildConf("spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.compression-level") + .doc("Compression level for Iceberg Parquet write operations.") + .intConf + .createOptional + + val ICEBERG_WRITE_PARQUET_ROW_GROUP_SIZE_BYTES = + buildConf("spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.row-group-size-bytes") + .doc("Row group size in bytes for Iceberg Parquet write operations.") + .bytesConf(ByteUnit.BYTE) + .createWithDefaultString("128MB") + + val ICEBERG_WRITE_PARQUET_PAGE_SIZE_BYTES = + buildConf("spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.page-size-bytes") + .doc("Page size in bytes for Iceberg Parquet write operations.") + .bytesConf(ByteUnit.BYTE) + .createWithDefaultString("1MB") + + val ICEBERG_WRITE_PARQUET_PAGE_ROW_LIMIT = + buildConf("spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.page-row-limit") + .doc("Maximum number of rows per page for Iceberg Parquet write operations.") + .intConf + .createWithDefault(20000) } diff --git a/cpp/velox/config/VeloxConfig.h b/cpp/velox/config/VeloxConfig.h index 33a66f46490b..8de5ce0ca25f 100644 --- a/cpp/velox/config/VeloxConfig.h +++ b/cpp/velox/config/VeloxConfig.h @@ -166,9 +166,23 @@ const std::string kMemoryPoolCapacityTransferAcrossTasks = const std::string kOrcUseColumnNames = "spark.gluten.sql.columnar.backend.velox.orcUseColumnNames"; const std::string kParquetUseColumnNames = "spark.gluten.sql.columnar.backend.velox.parquetUseColumnNames"; -// write fies +// write files const std::string kMaxPartitions = "spark.gluten.sql.columnar.backend.velox.maxPartitionsPerWritersSession"; +// Iceberg write configs +const std::string kIcebergWriteTargetFileSizeBytes = + "spark.gluten.sql.columnar.backend.velox.iceberg.write.target-file-size-bytes"; +const std::string kIcebergWriteParquetCompressionCodec = + "spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.compression-codec"; +const std::string kIcebergWriteParquetCompressionLevel = + "spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.compression-level"; +const std::string kIcebergWriteParquetRowGroupSizeBytes = + "spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.row-group-size-bytes"; +const std::string kIcebergWriteParquetPageSizeBytes = + "spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.page-size-bytes"; +const std::string kIcebergWriteParquetPageRowLimit = + "spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.page-row-limit"; + const std::string kGlogVerboseLevel = "spark.gluten.sql.columnar.backend.velox.glogVerboseLevel"; const uint32_t kGlogVerboseLevelDefault = 0; const uint32_t kGlogVerboseLevelMaximum = 99; diff --git a/cpp/velox/utils/ConfigExtractor.cc b/cpp/velox/utils/ConfigExtractor.cc index c6cd5f8b2974..1abdb4042c1d 100644 --- a/cpp/velox/utils/ConfigExtractor.cc +++ b/cpp/velox/utils/ConfigExtractor.cc @@ -240,6 +240,19 @@ std::shared_ptr createHiveConnectorSessionC configs[facebook::velox::connector::hive::HiveConfig::kOrcUseColumnNamesSession] = conf->get(kOrcUseColumnNames, true) ? "true" : "false"; + if (conf->isValueExists(kIcebergWriteTargetFileSizeBytes)) { + configs[facebook::velox::connector::hive::HiveConfig::kMaxTargetFileSizeSession] = + conf->get(kIcebergWriteTargetFileSizeBytes); + } + if (conf->isValueExists(kIcebergWriteParquetPageSizeBytes)) { + configs[facebook::velox::parquet::WriterOptions::kParquetSessionWritePageSize] = + conf->get(kIcebergWriteParquetPageSizeBytes); + } + if (conf->isValueExists(kIcebergWriteParquetRowGroupSizeBytes)) { + configs[facebook::velox::parquet::WriterOptions::kParquetSessionWriteBatchSize] = + conf->get(kIcebergWriteParquetRowGroupSizeBytes); + } + overwriteVeloxConf(conf.get(), configs, kDynamicBackendConfPrefix); return std::make_shared(std::move(configs)); } diff --git a/docs/velox-configuration.md b/docs/velox-configuration.md index 2fa3d2623530..84faa149f42f 100644 --- a/docs/velox-configuration.md +++ b/docs/velox-configuration.md @@ -9,75 +9,81 @@ nav_order: 16 ## Gluten Velox backend configurations -| Key | Default | Description | -|----------------------------------------------------------------------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| spark.gluten.sql.columnar.backend.velox.IOThreads | <undefined> | The Size of the IO thread pool in the Connector. This thread pool is used for split preloading and DirectBufferedInput. By default, the value is the same as the maximum task slots per Spark executor. | -| spark.gluten.sql.columnar.backend.velox.SplitPreloadPerDriver | 2 | The split preload per task | -| spark.gluten.sql.columnar.backend.velox.abandonPartialAggregationMinPct | 90 | If partial aggregation aggregationPct greater than this value, partial aggregation may be early abandoned. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | -| spark.gluten.sql.columnar.backend.velox.abandonPartialAggregationMinRows | 100000 | If partial aggregation input rows number greater than this value, partial aggregation may be early abandoned. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | -| spark.gluten.sql.columnar.backend.velox.asyncTimeoutOnTaskStopping | 30000ms | Timeout for asynchronous execution when task is being stopped in Velox backend. It's recommended to set to a number larger than network connection timeout that the possible aysnc tasks are relying on. | -| spark.gluten.sql.columnar.backend.velox.broadcastHashTableBuildThreads | 1 | The number of threads used to build the broadcast hash table. If not set or set to 0, it will use the default number of threads (available processors). | -| spark.gluten.sql.columnar.backend.velox.cacheEnabled | false | Enable Velox cache, default off. It's recommended to enablesoft-affinity as well when enable velox cache. | -| spark.gluten.sql.columnar.backend.velox.cachePrefetchMinPct | 0 | Set prefetch cache min pct for velox file scan | -| spark.gluten.sql.columnar.backend.velox.checkUsageLeak | true | Enable check memory usage leak. | -| spark.gluten.sql.columnar.backend.velox.cudf.batchSize | 2147483647 | Cudf input batch size after shuffle reader | -| spark.gluten.sql.columnar.backend.velox.cudf.enableTableScan | false | Enable cudf table scan | -| spark.gluten.sql.columnar.backend.velox.cudf.enableValidation | true | Heuristics you can apply to validate a cuDF/GPU plan and only offload when the entire stage can be fully and profitably executed on GPU | -| spark.gluten.sql.columnar.backend.velox.cudf.memoryPercent | 50 | The initial percent of GPU memory to allocate for memory resource for one thread. | -| spark.gluten.sql.columnar.backend.velox.cudf.memoryResource | async | GPU RMM memory resource. | -| spark.gluten.sql.columnar.backend.velox.cudf.shuffleMaxPrefetchBytes | 1028MB | Maximum bytes to prefetch in CPU memory during GPU shuffle read while waitingfor GPU available. | -| spark.gluten.sql.columnar.backend.velox.directorySizeGuess | 32KB | Deprecated, rename to spark.gluten.sql.columnar.backend.velox.footerEstimatedSize | -| spark.gluten.sql.columnar.backend.velox.fileHandleCacheEnabled | false | Disables caching if false. File handle cache should be disabled if files are mutable, i.e. file content may change while file path stays the same. | -| spark.gluten.sql.columnar.backend.velox.filePreloadThreshold | 1MB | Set the file preload threshold for velox file scan, refer to Velox's file-preload-threshold | -| spark.gluten.sql.columnar.backend.velox.floatingPointMode | loose | Config used to control the tolerance of floating point operations alignment with Spark. When the mode is set to strict, flushing is disabled for sum(float/double)and avg(float/double). When set to loose, flushing will be enabled. | -| spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation | true | Enable flushable aggregation. If true, Gluten will try converting regular aggregation into Velox's flushable aggregation when applicable. A flushable aggregation could emit intermediate result at anytime when memory is full / data reduction ratio is low. | -| spark.gluten.sql.columnar.backend.velox.footerEstimatedSize | 32KB | Set the footer estimated size for velox file scan, refer to Velox's footer-estimated-size | -| spark.gluten.sql.columnar.backend.velox.hashProbe.bloomFilterPushdown.maxSize | 0b | The maximum byte size of Bloom filter that can be generated from hash probe. When set to 0, no Bloom filter will be generated. To achieve optimal performance, this should not be too larger than the CPU cache size on the host. | -| spark.gluten.sql.columnar.backend.velox.hashProbe.dynamicFilterPushdown.enabled | true | Whether hash probe can generate any dynamic filter (including Bloom filter) and push down to upstream operators. | -| spark.gluten.sql.columnar.backend.velox.loadQuantum | 256MB | Set the load quantum for velox file scan, recommend to use the default value (256MB) for performance consideration. If Velox cache is enabled, it can be 8MB at most. | -| spark.gluten.sql.columnar.backend.velox.maxCoalescedBytes | 64MB | Set the max coalesced bytes for velox file scan | -| spark.gluten.sql.columnar.backend.velox.maxCoalescedDistance | 512KB | Set the max coalesced distance bytes for velox file scan | -| spark.gluten.sql.columnar.backend.velox.maxCompiledRegexes | 100 | Controls maximum number of compiled regular expression patterns per function instance per thread of execution. | -| spark.gluten.sql.columnar.backend.velox.maxExtendedPartialAggregationMemory | <undefined> | Set the max extended memory of partial aggregation in bytes. When this option is set to a value greater than 0, it will override spark.gluten.sql.columnar.backend.velox.maxExtendedPartialAggregationMemoryRatio. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | -| spark.gluten.sql.columnar.backend.velox.maxExtendedPartialAggregationMemoryRatio | 0.15 | Set the max extended memory of partial aggregation as maxExtendedPartialAggregationMemoryRatio of offheap size. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | -| spark.gluten.sql.columnar.backend.velox.maxPartialAggregationMemory | <undefined> | Set the max memory of partial aggregation in bytes. When this option is set to a value greater than 0, it will override spark.gluten.sql.columnar.backend.velox.maxPartialAggregationMemoryRatio. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | -| spark.gluten.sql.columnar.backend.velox.maxPartialAggregationMemoryRatio | 0.1 | Set the max memory of partial aggregation as maxPartialAggregationMemoryRatio of offheap size. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | -| spark.gluten.sql.columnar.backend.velox.maxPartitionsPerWritersSession | 10000 | Maximum number of partitions per a single table writer instance. | -| spark.gluten.sql.columnar.backend.velox.maxSpillBytes | 100G | The maximum file size of a query | -| spark.gluten.sql.columnar.backend.velox.maxSpillFileSize | 1GB | The maximum size of a single spill file created | -| spark.gluten.sql.columnar.backend.velox.maxSpillLevel | 4 | The max allowed spilling level with zero being the initial spilling level | -| spark.gluten.sql.columnar.backend.velox.maxSpillRunRows | 3M | The maximum row size of a single spill run | -| spark.gluten.sql.columnar.backend.velox.memCacheSize | 1GB | The memory cache size | -| spark.gluten.sql.columnar.backend.velox.memInitCapacity | 8MB | The initial memory capacity to reserve for a newly created Velox query memory pool. | -| spark.gluten.sql.columnar.backend.velox.memoryPoolCapacityTransferAcrossTasks | true | Whether to allow memory capacity transfer between memory pools from different tasks. | -| spark.gluten.sql.columnar.backend.velox.memoryUseHugePages | false | Use explicit huge pages for Velox memory allocation. | -| spark.gluten.sql.columnar.backend.velox.orc.scan.enabled | true | Enable velox orc scan. If disabled, vanilla spark orc scan will be used. | -| spark.gluten.sql.columnar.backend.velox.orcUseColumnNames | true | Maps table field names to file field names using names, not indices for ORC files. | -| spark.gluten.sql.columnar.backend.velox.parquetUseColumnNames | true | Maps table field names to file field names using names, not indices for Parquet files. | -| spark.gluten.sql.columnar.backend.velox.prefetchRowGroups | 1 | Set the prefetch row groups for velox file scan | -| spark.gluten.sql.columnar.backend.velox.queryTraceEnabled | false | Enable query tracing flag. | -| spark.gluten.sql.columnar.backend.velox.reclaimMaxWaitMs | 3600000ms | The max time in ms to wait for memory reclaim. | -| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput | true | If true, combine small columnar batches together before sending to shuffle. The default minimum output batch size is equal to 0.25 * spark.gluten.sql.columnar.maxBatchSize | -| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput.minSize | <undefined> | The minimum batch size for shuffle. If size of an input batch is smaller than the value, it will be combined with other batches before sending to shuffle. Only functions when spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput is set to true. Default value: 0.25 * | -| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInputOuptut.minSize | <undefined> | The minimum batch size for shuffle input and output. If size of an input batch is smaller than the value, it will be combined with other batches before sending to shuffle. The same applies for batches output by shuffle read. Only functions when spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput or spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleOutput is set to true. Default value: 0.25 * | -| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleOutput | false | If true, combine small columnar batches together right after shuffle read. The default minimum output batch size is equal to 0.25 * spark.gluten.sql.columnar.maxBatchSize | -| spark.gluten.sql.columnar.backend.velox.showTaskMetricsWhenFinished | false | Show velox full task metrics when finished. | -| spark.gluten.sql.columnar.backend.velox.spillFileSystem | local | The filesystem used to store spill data. local: The local file system. heap-over-local: Write file to JVM heap if having extra heap space. Otherwise write to local file system. | -| spark.gluten.sql.columnar.backend.velox.spillStrategy | auto | none: Disable spill on Velox backend; auto: Let Spark memory manager manage Velox's spilling | -| spark.gluten.sql.columnar.backend.velox.ssdCacheIOThreads | 1 | The IO threads for cache promoting | -| spark.gluten.sql.columnar.backend.velox.ssdCachePath | /tmp | The folder to store the cache files, better on SSD | -| spark.gluten.sql.columnar.backend.velox.ssdCacheShards | 1 | The cache shards | -| spark.gluten.sql.columnar.backend.velox.ssdCacheSize | 1GB | The SSD cache size, will do memory caching only if this value = 0 | -| spark.gluten.sql.columnar.backend.velox.ssdCheckpointIntervalBytes | 0 | Checkpoint after every 'checkpointIntervalBytes' for SSD cache. 0 means no checkpointing. | -| spark.gluten.sql.columnar.backend.velox.ssdChecksumEnabled | false | If true, checksum write to SSD is enabled. | -| spark.gluten.sql.columnar.backend.velox.ssdChecksumReadVerificationEnabled | false | If true, checksum read verification from SSD is enabled. | -| spark.gluten.sql.columnar.backend.velox.ssdDisableFileCow | false | True if copy on write should be disabled. | -| spark.gluten.sql.columnar.backend.velox.ssdODirect | false | The O_DIRECT flag for cache writing | -| spark.gluten.sql.columnar.backend.velox.valueStream.dynamicFilter.enabled | false | Whether to apply dynamic filters pushed down from hash probe in the ValueStream (shuffle reader) operator to filter rows before they reach the hash join. | -| spark.gluten.sql.enable.enhancedFeatures | true | Enable some features including iceberg native write and other features. | -| spark.gluten.sql.rewrite.castArrayToString | true | When true, rewrite `cast(array as String)` to `concat('[', array_join(array, ', ', null), ']')` to allow offloading to Velox. | -| spark.gluten.velox.castFromVarcharAddTrimNode | false | If true, will add a trim node which has the same sementic as vanilla Spark to CAST-from-varchar.Otherwise, do nothing. | -| spark.gluten.velox.fs.s3a.connect.timeout | 200s | Timeout for AWS s3 connection. | +| Key | Default | Description | +|------------------------------------------------------------------------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| spark.gluten.sql.columnar.backend.velox.IOThreads | <undefined> | The Size of the IO thread pool in the Connector. This thread pool is used for split preloading and DirectBufferedInput. By default, the value is the same as the maximum task slots per Spark executor. | +| spark.gluten.sql.columnar.backend.velox.SplitPreloadPerDriver | 2 | The split preload per task | +| spark.gluten.sql.columnar.backend.velox.abandonPartialAggregationMinPct | 90 | If partial aggregation aggregationPct greater than this value, partial aggregation may be early abandoned. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | +| spark.gluten.sql.columnar.backend.velox.abandonPartialAggregationMinRows | 100000 | If partial aggregation input rows number greater than this value, partial aggregation may be early abandoned. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | +| spark.gluten.sql.columnar.backend.velox.asyncTimeoutOnTaskStopping | 30000ms | Timeout for asynchronous execution when task is being stopped in Velox backend. It's recommended to set to a number larger than network connection timeout that the possible aysnc tasks are relying on. | +| spark.gluten.sql.columnar.backend.velox.broadcastHashTableBuildThreads | 1 | The number of threads used to build the broadcast hash table. If not set or set to 0, it will use the default number of threads (available processors). | +| spark.gluten.sql.columnar.backend.velox.cacheEnabled | false | Enable Velox cache, default off. It's recommended to enablesoft-affinity as well when enable velox cache. | +| spark.gluten.sql.columnar.backend.velox.cachePrefetchMinPct | 0 | Set prefetch cache min pct for velox file scan | +| spark.gluten.sql.columnar.backend.velox.checkUsageLeak | true | Enable check memory usage leak. | +| spark.gluten.sql.columnar.backend.velox.cudf.batchSize | 2147483647 | Cudf input batch size after shuffle reader | +| spark.gluten.sql.columnar.backend.velox.cudf.enableTableScan | false | Enable cudf table scan | +| spark.gluten.sql.columnar.backend.velox.cudf.enableValidation | true | Heuristics you can apply to validate a cuDF/GPU plan and only offload when the entire stage can be fully and profitably executed on GPU | +| spark.gluten.sql.columnar.backend.velox.cudf.memoryPercent | 50 | The initial percent of GPU memory to allocate for memory resource for one thread. | +| spark.gluten.sql.columnar.backend.velox.cudf.memoryResource | async | GPU RMM memory resource. | +| spark.gluten.sql.columnar.backend.velox.cudf.shuffleMaxPrefetchBytes | 1028MB | Maximum bytes to prefetch in CPU memory during GPU shuffle read while waitingfor GPU available. | +| spark.gluten.sql.columnar.backend.velox.directorySizeGuess | 32KB | Deprecated, rename to spark.gluten.sql.columnar.backend.velox.footerEstimatedSize | +| spark.gluten.sql.columnar.backend.velox.fileHandleCacheEnabled | false | Disables caching if false. File handle cache should be disabled if files are mutable, i.e. file content may change while file path stays the same. | +| spark.gluten.sql.columnar.backend.velox.filePreloadThreshold | 1MB | Set the file preload threshold for velox file scan, refer to Velox's file-preload-threshold | +| spark.gluten.sql.columnar.backend.velox.floatingPointMode | loose | Config used to control the tolerance of floating point operations alignment with Spark. When the mode is set to strict, flushing is disabled for sum(float/double)and avg(float/double). When set to loose, flushing will be enabled. | +| spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation | true | Enable flushable aggregation. If true, Gluten will try converting regular aggregation into Velox's flushable aggregation when applicable. A flushable aggregation could emit intermediate result at anytime when memory is full / data reduction ratio is low. | +| spark.gluten.sql.columnar.backend.velox.footerEstimatedSize | 32KB | Set the footer estimated size for velox file scan, refer to Velox's footer-estimated-size | +| spark.gluten.sql.columnar.backend.velox.hashProbe.bloomFilterPushdown.maxSize | 0b | The maximum byte size of Bloom filter that can be generated from hash probe. When set to 0, no Bloom filter will be generated. To achieve optimal performance, this should not be too larger than the CPU cache size on the host. | +| spark.gluten.sql.columnar.backend.velox.hashProbe.dynamicFilterPushdown.enabled | true | Whether hash probe can generate any dynamic filter (including Bloom filter) and push down to upstream operators. | +| spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.compression-codec | zstd | Compression codec to use for Iceberg Parquet write operations. | +| spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.compression-level | <undefined> | Compression level for Iceberg Parquet write operations. | +| spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.page-row-limit | 20000 | Maximum number of rows per page for Iceberg Parquet write operations. | +| spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.page-size-bytes | 1MB | Page size in bytes for Iceberg Parquet write operations. | +| spark.gluten.sql.columnar.backend.velox.iceberg.write.parquet.row-group-size-bytes | 128MB | Row group size in bytes for Iceberg Parquet write operations. | +| spark.gluten.sql.columnar.backend.velox.iceberg.write.target-file-size-bytes | 512MB | Target file size in bytes for Iceberg write operations. | +| spark.gluten.sql.columnar.backend.velox.loadQuantum | 256MB | Set the load quantum for velox file scan, recommend to use the default value (256MB) for performance consideration. If Velox cache is enabled, it can be 8MB at most. | +| spark.gluten.sql.columnar.backend.velox.maxCoalescedBytes | 64MB | Set the max coalesced bytes for velox file scan | +| spark.gluten.sql.columnar.backend.velox.maxCoalescedDistance | 512KB | Set the max coalesced distance bytes for velox file scan | +| spark.gluten.sql.columnar.backend.velox.maxCompiledRegexes | 100 | Controls maximum number of compiled regular expression patterns per function instance per thread of execution. | +| spark.gluten.sql.columnar.backend.velox.maxExtendedPartialAggregationMemory | <undefined> | Set the max extended memory of partial aggregation in bytes. When this option is set to a value greater than 0, it will override spark.gluten.sql.columnar.backend.velox.maxExtendedPartialAggregationMemoryRatio. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | +| spark.gluten.sql.columnar.backend.velox.maxExtendedPartialAggregationMemoryRatio | 0.15 | Set the max extended memory of partial aggregation as maxExtendedPartialAggregationMemoryRatio of offheap size. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | +| spark.gluten.sql.columnar.backend.velox.maxPartialAggregationMemory | <undefined> | Set the max memory of partial aggregation in bytes. When this option is set to a value greater than 0, it will override spark.gluten.sql.columnar.backend.velox.maxPartialAggregationMemoryRatio. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | +| spark.gluten.sql.columnar.backend.velox.maxPartialAggregationMemoryRatio | 0.1 | Set the max memory of partial aggregation as maxPartialAggregationMemoryRatio of offheap size. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | +| spark.gluten.sql.columnar.backend.velox.maxPartitionsPerWritersSession | 10000 | Maximum number of partitions per a single table writer instance. | +| spark.gluten.sql.columnar.backend.velox.maxSpillBytes | 100G | The maximum file size of a query | +| spark.gluten.sql.columnar.backend.velox.maxSpillFileSize | 1GB | The maximum size of a single spill file created | +| spark.gluten.sql.columnar.backend.velox.maxSpillLevel | 4 | The max allowed spilling level with zero being the initial spilling level | +| spark.gluten.sql.columnar.backend.velox.maxSpillRunRows | 3M | The maximum row size of a single spill run | +| spark.gluten.sql.columnar.backend.velox.memCacheSize | 1GB | The memory cache size | +| spark.gluten.sql.columnar.backend.velox.memInitCapacity | 8MB | The initial memory capacity to reserve for a newly created Velox query memory pool. | +| spark.gluten.sql.columnar.backend.velox.memoryPoolCapacityTransferAcrossTasks | true | Whether to allow memory capacity transfer between memory pools from different tasks. | +| spark.gluten.sql.columnar.backend.velox.memoryUseHugePages | false | Use explicit huge pages for Velox memory allocation. | +| spark.gluten.sql.columnar.backend.velox.orc.scan.enabled | true | Enable velox orc scan. If disabled, vanilla spark orc scan will be used. | +| spark.gluten.sql.columnar.backend.velox.orcUseColumnNames | true | Maps table field names to file field names using names, not indices for ORC files. | +| spark.gluten.sql.columnar.backend.velox.parquetUseColumnNames | true | Maps table field names to file field names using names, not indices for Parquet files. | +| spark.gluten.sql.columnar.backend.velox.prefetchRowGroups | 1 | Set the prefetch row groups for velox file scan | +| spark.gluten.sql.columnar.backend.velox.queryTraceEnabled | false | Enable query tracing flag. | +| spark.gluten.sql.columnar.backend.velox.reclaimMaxWaitMs | 3600000ms | The max time in ms to wait for memory reclaim. | +| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput | true | If true, combine small columnar batches together before sending to shuffle. The default minimum output batch size is equal to 0.25 * spark.gluten.sql.columnar.maxBatchSize | +| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput.minSize | <undefined> | The minimum batch size for shuffle. If size of an input batch is smaller than the value, it will be combined with other batches before sending to shuffle. Only functions when spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput is set to true. Default value: 0.25 * | +| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInputOuptut.minSize | <undefined> | The minimum batch size for shuffle input and output. If size of an input batch is smaller than the value, it will be combined with other batches before sending to shuffle. The same applies for batches output by shuffle read. Only functions when spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput or spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleOutput is set to true. Default value: 0.25 * | +| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleOutput | false | If true, combine small columnar batches together right after shuffle read. The default minimum output batch size is equal to 0.25 * spark.gluten.sql.columnar.maxBatchSize | +| spark.gluten.sql.columnar.backend.velox.showTaskMetricsWhenFinished | false | Show velox full task metrics when finished. | +| spark.gluten.sql.columnar.backend.velox.spillFileSystem | local | The filesystem used to store spill data. local: The local file system. heap-over-local: Write file to JVM heap if having extra heap space. Otherwise write to local file system. | +| spark.gluten.sql.columnar.backend.velox.spillStrategy | auto | none: Disable spill on Velox backend; auto: Let Spark memory manager manage Velox's spilling | +| spark.gluten.sql.columnar.backend.velox.ssdCacheIOThreads | 1 | The IO threads for cache promoting | +| spark.gluten.sql.columnar.backend.velox.ssdCachePath | /tmp | The folder to store the cache files, better on SSD | +| spark.gluten.sql.columnar.backend.velox.ssdCacheShards | 1 | The cache shards | +| spark.gluten.sql.columnar.backend.velox.ssdCacheSize | 1GB | The SSD cache size, will do memory caching only if this value = 0 | +| spark.gluten.sql.columnar.backend.velox.ssdCheckpointIntervalBytes | 0 | Checkpoint after every 'checkpointIntervalBytes' for SSD cache. 0 means no checkpointing. | +| spark.gluten.sql.columnar.backend.velox.ssdChecksumEnabled | false | If true, checksum write to SSD is enabled. | +| spark.gluten.sql.columnar.backend.velox.ssdChecksumReadVerificationEnabled | false | If true, checksum read verification from SSD is enabled. | +| spark.gluten.sql.columnar.backend.velox.ssdDisableFileCow | false | True if copy on write should be disabled. | +| spark.gluten.sql.columnar.backend.velox.ssdODirect | false | The O_DIRECT flag for cache writing | +| spark.gluten.sql.columnar.backend.velox.valueStream.dynamicFilter.enabled | false | Whether to apply dynamic filters pushed down from hash probe in the ValueStream (shuffle reader) operator to filter rows before they reach the hash join. | +| spark.gluten.sql.enable.enhancedFeatures | true | Enable some features including iceberg native write and other features. | +| spark.gluten.sql.rewrite.castArrayToString | true | When true, rewrite `cast(array as String)` to `concat('[', array_join(array, ', ', null), ']')` to allow offloading to Velox. | +| spark.gluten.velox.castFromVarcharAddTrimNode | false | If true, will add a trim node which has the same sementic as vanilla Spark to CAST-from-varchar.Otherwise, do nothing. | +| spark.gluten.velox.fs.s3a.connect.timeout | 200s | Timeout for AWS s3 connection. | ## Gluten Velox backend *experimental* configurations