@@ -398,28 +398,31 @@ index c4fb4fa943c..a04b23870a8 100644
398398 assert(exchanges.size == 2)
399399 }
400400diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
401- index f33432ddb6f..7122af0d414 100644
401+ index f33432ddb6f..8ec7285ed84 100644
402402--- a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
403403+++ b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
404404@@ -22,6 +22,7 @@ import org.scalatest.GivenWhenThen
405405 import org.apache.spark.sql.catalyst.expressions.{DynamicPruningExpression, Expression}
406406 import org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode._
407407 import org.apache.spark.sql.catalyst.plans.ExistenceJoin
408- + import org.apache.spark.sql.comet.CometScanExec
408+ + import org.apache.spark.sql.comet.{CometNativeScanExec, CometScanExec}
409409 import org.apache.spark.sql.connector.catalog.{InMemoryTableCatalog, InMemoryTableWithV2FilterCatalog}
410410 import org.apache.spark.sql.execution._
411411 import org.apache.spark.sql.execution.adaptive._
412- @@ -262,6 +263,9 @@ abstract class DynamicPartitionPruningSuiteBase
412+ @@ -262,6 +263,12 @@ abstract class DynamicPartitionPruningSuiteBase
413413 case s: BatchScanExec => s.runtimeFilters.collect {
414414 case d: DynamicPruningExpression => d.child
415415 }
416416+ case s: CometScanExec => s.partitionFilters.collect {
417417+ case d: DynamicPruningExpression => d.child
418+ + }
419+ + case s: CometNativeScanExec => s.partitionFilters.collect {
420+ + case d: DynamicPruningExpression => d.child
418421+ }
419422 case _ => Nil
420423 }
421424 }
422- @@ -1027,7 +1031 ,8 @@ abstract class DynamicPartitionPruningSuiteBase
425+ @@ -1027,7 +1034 ,8 @@ abstract class DynamicPartitionPruningSuiteBase
423426 }
424427 }
425428
@@ -429,7 +432,7 @@ index f33432ddb6f..7122af0d414 100644
429432 withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
430433 SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
431434 withTable("large", "dimTwo", "dimThree") {
432- @@ -1215,7 +1220 ,8 @@ abstract class DynamicPartitionPruningSuiteBase
435+ @@ -1215,7 +1223 ,8 @@ abstract class DynamicPartitionPruningSuiteBase
433436 }
434437
435438 test("SPARK-32509: Unused Dynamic Pruning filter shouldn't affect " +
@@ -439,7 +442,7 @@ index f33432ddb6f..7122af0d414 100644
439442 withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
440443 withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
441444 val df = sql(
442- @@ -1423,7 +1429 ,8 @@ abstract class DynamicPartitionPruningSuiteBase
445+ @@ -1423,7 +1432 ,8 @@ abstract class DynamicPartitionPruningSuiteBase
443446 }
444447 }
445448
@@ -449,7 +452,7 @@ index f33432ddb6f..7122af0d414 100644
449452 withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "true") {
450453 val df = sql(
451454 """ WITH v as (
452- @@ -1698,7 +1705 ,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
455+ @@ -1698,7 +1708 ,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
453456 * Check the static scan metrics with and without DPP
454457 */
455458 test("static scan metrics",
@@ -459,7 +462,7 @@ index f33432ddb6f..7122af0d414 100644
459462 withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
460463 SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
461464 SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
462- @@ -1729,6 +1737 ,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
465+ @@ -1729,6 +1740 ,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
463466 case s: BatchScanExec =>
464467 // we use f1 col for v2 tables due to schema pruning
465468 s.output.exists(_.exists(_.argString(maxFields = 100).contains("f1")))
@@ -928,7 +931,7 @@ index c26757c9cff..d55775f09d7 100644
928931 protected val baseResourcePath = {
929932 // use the same way as `SQLQueryTestSuite` to get the resource path
930933diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
931- index 3cf2bfd17ab..49728c35c42 100644
934+ index 3cf2bfd17ab..b1c1e41e6a9 100644
932935--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
933936+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
934937@@ -1521,7 +1521,8 @@ class SQLQuerySuite extends QueryTest with SharedSparkSession with AdaptiveSpark
@@ -1245,16 +1248,17 @@ index de24b8c82b0..1f835481290 100644
12451248
12461249 setupTestData()
12471250diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/RemoveRedundantProjectsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/RemoveRedundantProjectsSuite.scala
1248- index 9e9d717db3b..73de2b84938 100644
1251+ index 9e9d717db3b..cdd1042a880 100644
12491252--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/RemoveRedundantProjectsSuite.scala
12501253+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/RemoveRedundantProjectsSuite.scala
12511254@@ -17,7 +17,10 @@
12521255
12531256 package org.apache.spark.sql.execution
12541257
1258+ - import org.apache.spark.sql.{DataFrame, QueryTest, Row}
12551259+ import org.apache.comet.CometConf
12561260+
1257- import org.apache.spark.sql.{DataFrame, QueryTest, Row}
1261+ + import org.apache.spark.sql.{DataFrame, IgnoreCometNativeDataFusion , QueryTest, Row}
12581262+ import org.apache.spark.sql.comet.CometProjectExec
12591263 import org.apache.spark.sql.connector.SimpleWritableDataSource
12601264 import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanHelper, DisableAdaptiveExecutionSuite, EnableAdaptiveExecutionSuite}
@@ -1271,7 +1275,17 @@ index 9e9d717db3b..73de2b84938 100644
12711275 assert(actual == expected)
12721276 }
12731277 }
1274- @@ -134,12 +140,26 @@ abstract class RemoveRedundantProjectsSuiteBase
1278+ @@ -112,7 +118,8 @@ abstract class RemoveRedundantProjectsSuiteBase
1279+ assertProjectExec(query, 1, 3)
1280+ }
1281+
1282+ - test("join with ordering requirement") {
1283+ + test("join with ordering requirement",
1284+ + IgnoreCometNativeDataFusion("https://github.com/apache/datafusion-comet/issues/4014")) {
1285+ val query = "select * from (select key, a, c, b from testView) as t1 join " +
1286+ "(select key, a, b, c from testView) as t2 on t1.key = t2.key where t2.a > 50"
1287+ assertProjectExec(query, 2, 2)
1288+ @@ -134,12 +141,26 @@ abstract class RemoveRedundantProjectsSuiteBase
12751289 val df = data.selectExpr("a", "b", "key", "explode(array(key, a, b)) as d").filter("d > 0")
12761290 df.collect()
12771291 val plan = df.queryExecution.executedPlan
@@ -1300,7 +1314,7 @@ index 9e9d717db3b..73de2b84938 100644
13001314 case g @ GenerateExec(_, requiredChildOutput, _, _, child) =>
13011315 g.copy(requiredChildOutput = requiredChildOutput.reverse,
13021316 child = ProjectExec(requiredChildOutput.reverse, child))
1303- @@ -151,6 +171 ,7 @@ abstract class RemoveRedundantProjectsSuiteBase
1317+ @@ -151,6 +172 ,7 @@ abstract class RemoveRedundantProjectsSuiteBase
13041318 // The manually added ProjectExec node shouldn't be removed.
13051319 assert(collectWithSubqueries(newExecutedPlan) {
13061320 case p: ProjectExec => p
@@ -1420,7 +1434,7 @@ index 5a413c77754..207b66e1d7b 100644
14201434
14211435 import testImplicits._
14221436diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala
1423- index 2f8e401e743..a4f94417dcc 100644
1437+ index 2f8e401e743..dbcf3171946 100644
14241438--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala
14251439+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala
14261440@@ -27,9 +27,11 @@ import org.scalatest.time.SpanSugar._
@@ -1782,15 +1796,15 @@ index 2f8e401e743..a4f94417dcc 100644
17821796 CostEvaluator.instantiate(
17831797 classOf[SimpleShuffleSortCostEvaluator].getCanonicalName, spark.sparkContext.getConf)
17841798 intercept[IllegalArgumentException] {
1785- @@ -2452,6 +2493 ,7 @@ class AdaptiveQueryExecSuite
1799+ @@ -2452,6 +2492 ,7 @@ class AdaptiveQueryExecSuite
17861800 val (_, adaptive) = runAdaptiveAndVerifyResult(query)
17871801 assert(adaptive.collect {
17881802 case sort: SortExec => sort
17891803+ case sort: CometSortExec => sort
17901804 }.size == 1)
17911805 val read = collect(adaptive) {
17921806 case read: AQEShuffleReadExec => read
1793- @@ -2469,7 +2511 ,8 @@ class AdaptiveQueryExecSuite
1807+ @@ -2469,7 +2510 ,8 @@ class AdaptiveQueryExecSuite
17941808 }
17951809 }
17961810
@@ -1800,7 +1814,7 @@ index 2f8e401e743..a4f94417dcc 100644
18001814 withTempView("v") {
18011815 withSQLConf(
18021816 SQLConf.ADAPTIVE_OPTIMIZE_SKEWS_IN_REBALANCE_PARTITIONS_ENABLED.key -> "true",
1803- @@ -2581,7 +2624 ,7 @@ class AdaptiveQueryExecSuite
1817+ @@ -2581,7 +2623 ,7 @@ class AdaptiveQueryExecSuite
18041818 runAdaptiveAndVerifyResult("SELECT key1 FROM skewData1 JOIN skewData2 ON key1 = key2 " +
18051819 "JOIN skewData3 ON value2 = value3")
18061820 val shuffles1 = collect(adaptive1) {
@@ -1809,7 +1823,7 @@ index 2f8e401e743..a4f94417dcc 100644
18091823 }
18101824 assert(shuffles1.size == 4)
18111825 val smj1 = findTopLevelSortMergeJoin(adaptive1)
1812- @@ -2592,7 +2635 ,7 @@ class AdaptiveQueryExecSuite
1826+ @@ -2592,7 +2634 ,7 @@ class AdaptiveQueryExecSuite
18131827 runAdaptiveAndVerifyResult("SELECT key1 FROM skewData1 JOIN skewData2 ON key1 = key2 " +
18141828 "JOIN skewData3 ON value1 = value3")
18151829 val shuffles2 = collect(adaptive2) {
@@ -1818,15 +1832,15 @@ index 2f8e401e743..a4f94417dcc 100644
18181832 }
18191833 assert(shuffles2.size == 4)
18201834 val smj2 = findTopLevelSortMergeJoin(adaptive2)
1821- @@ -2850,6 +2893 ,7 @@ class AdaptiveQueryExecSuite
1835+ @@ -2850,6 +2892 ,7 @@ class AdaptiveQueryExecSuite
18221836 }.size == (if (firstAccess) 1 else 0))
18231837 assert(collect(initialExecutedPlan) {
18241838 case s: SortExec => s
18251839+ case s: CometSortExec => s
18261840 }.size == (if (firstAccess) 2 else 0))
18271841 assert(collect(initialExecutedPlan) {
18281842 case i: InMemoryTableScanLike => i
1829- @@ -2980,7 +3024 ,9 @@ class AdaptiveQueryExecSuite
1843+ @@ -2980,7 +3023 ,9 @@ class AdaptiveQueryExecSuite
18301844
18311845 val plan = df.queryExecution.executedPlan.asInstanceOf[AdaptiveSparkPlanExec]
18321846 assert(plan.inputPlan.isInstanceOf[TakeOrderedAndProjectExec])
@@ -2973,7 +2987,7 @@ index c63c748953f..7edca9c93a6 100644
29732987 implicit val formats = new DefaultFormats {
29742988 override def dateFormatter = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss")
29752989diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/DynamicPartitionPruningHiveScanSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/DynamicPartitionPruningHiveScanSuite.scala
2976- index 52abd248f3a..7a199931a08 100644
2990+ index 52abd248f3a..b4e096cae24 100644
29772991--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/DynamicPartitionPruningHiveScanSuite.scala
29782992+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/DynamicPartitionPruningHiveScanSuite.scala
29792993@@ -19,6 +19,7 @@ package org.apache.spark.sql.hive
@@ -2984,12 +2998,15 @@ index 52abd248f3a..7a199931a08 100644
29842998 import org.apache.spark.sql.execution._
29852999 import org.apache.spark.sql.execution.adaptive.{DisableAdaptiveExecutionSuite, EnableAdaptiveExecutionSuite}
29863000 import org.apache.spark.sql.hive.execution.HiveTableScanExec
2987- @@ -35,6 +36,9 @@ abstract class DynamicPartitionPruningHiveScanSuiteBase
3001+ @@ -35,6 +36,12 @@ abstract class DynamicPartitionPruningHiveScanSuiteBase
29883002 case s: FileSourceScanExec => s.partitionFilters.collect {
29893003 case d: DynamicPruningExpression => d.child
29903004 }
29913005+ case s: CometScanExec => s.partitionFilters.collect {
29923006+ case d: DynamicPruningExpression => d.child
3007+ + }
3008+ + case s: CometNativeScanExec => s.partitionFilters.collect {
3009+ + case d: DynamicPruningExpression => d.child
29933010+ }
29943011 case h: HiveTableScanExec => h.partitionPruningPred.collect {
29953012 case d: DynamicPruningExpression => d.child
0 commit comments