diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index 447ccd326f6d0..a6229bb476e61 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -1538,7 +1538,7 @@ object SQLConf { .createWithDefault(false) val V2_BUCKETING_ALLOW_COMPATIBLE_TRANSFORMS = - buildConf("spark.sql.sources.v2.bucketing.allow.enabled") + buildConf("spark.sql.sources.v2.bucketing.allowCompatibleTransforms.enabled") .doc("Whether to allow storage-partition join in the case where the partition transforms" + "are compatible but not identical. This config requires both " + s"${V2_BUCKETING_ENABLED.key} and ${V2_BUCKETING_PUSH_PART_VALUES_ENABLED.key} to be " + diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/BatchScanExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/BatchScanExec.scala index 3772d1f9f8847..6bf2f542d53d6 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/BatchScanExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/BatchScanExec.scala @@ -237,6 +237,7 @@ case class BatchScanExec( case _ => filteredPartitions } + new DataSourceRDD( sparkContext, finalPartitions, readerFactory, supportsColumnar, customMetrics) } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/KeyGroupedPartitioningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/KeyGroupedPartitioningSuite.scala index 2a49679719d92..692a898730c8c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/KeyGroupedPartitioningSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/KeyGroupedPartitioningSuite.scala @@ -1549,7 +1549,7 @@ class KeyGroupedPartitioningSuite extends DistributionAndOrderingSuiteBase { Row(3, 4, 2, 2, "ad", "04"), Row(4, 3, 3, 3, "ae", "05"), Row(5, 2, 4, 4, "af", "06"), - Row(6, 1, 5, 5, "ag", "07"), + Row(6, 1, 5, 5, "ag", "07") )) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/catalog/functions/transformFunctions.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/catalog/functions/transformFunctions.scala index 67da85480ef92..823177cf466a7 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/catalog/functions/transformFunctions.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/catalog/functions/transformFunctions.scala @@ -91,7 +91,7 @@ object BucketFunction extends ReducibleFunction[Int, Int] { otherNumBuckets: Option[_]): Option[Reducer[Int]] = { (thisNumBuckets, otherNumBuckets) match { case (Some(thisNumBucketsVal: Int), Some(otherNumBucketsVal: Int)) - if func.isInstanceOf[ReducibleFunction[_, _]] && + if func == BucketFunction && ((thisNumBucketsVal > otherNumBucketsVal) && (thisNumBucketsVal % otherNumBucketsVal == 0)) => Some(BucketReducer(thisNumBucketsVal, otherNumBucketsVal))