Skip to content

Commit f55095a

Browse files
committed
test: unignore DynamicPartitionPruning static scan metrics test
DPP support for native_datafusion scan was added in apache#4011. Update the Spark SQL test diffs so the test runs under native_datafusion by removing the IgnoreCometNativeDataFusion tag and adding a CometNativeScanExec case to getFactScan.
1 parent bb79752 commit f55095a

3 files changed

Lines changed: 12 additions & 36 deletions

File tree

dev/diffs/3.4.3.diff

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -417,7 +417,7 @@ index daef11ae4d6..9f3cc9181f2 100644
417417
assert(exchanges.size == 2)
418418
}
419419
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
420-
index f33432ddb6f..99729e465e4 100644
420+
index f33432ddb6f..b7a5fd72f7d 100644
421421
--- a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
422422
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
423423
@@ -22,6 +22,7 @@ import org.scalatest.GivenWhenThen
@@ -515,21 +515,13 @@ index f33432ddb6f..99729e465e4 100644
515515
}
516516
assert(subqueryBroadcastExecs.size === 1)
517517
subqueryBroadcastExecs.foreach { subqueryBroadcastExec =>
518-
@@ -1698,7 +1718,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
519-
* Check the static scan metrics with and without DPP
520-
*/
521-
test("static scan metrics",
522-
- DisableAdaptiveExecution("DPP in AQE must reuse broadcast")) {
523-
+ DisableAdaptiveExecution("DPP in AQE must reuse broadcast"),
524-
+ IgnoreCometNativeDataFusion("https://github.com/apache/datafusion-comet/issues/3442")) {
525-
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
526-
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
527-
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
528-
@@ -1729,6 +1750,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
518+
@@ -1729,6 +1749,10 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
529519
case s: BatchScanExec =>
530520
// we use f1 col for v2 tables due to schema pruning
531521
s.output.exists(_.exists(_.argString(maxFields = 100).contains("f1")))
532522
+ case s: CometScanExec =>
523+
+ s.output.exists(_.exists(_.argString(maxFields = 100).contains("fid")))
524+
+ case s: CometNativeScanExec =>
533525
+ s.output.exists(_.exists(_.argString(maxFields = 100).contains("fid")))
534526
case _ => false
535527
}

dev/diffs/3.5.8.diff

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -398,7 +398,7 @@ index c4fb4fa943c..a04b23870a8 100644
398398
assert(exchanges.size == 2)
399399
}
400400
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
401-
index f33432ddb6f..99729e465e4 100644
401+
index f33432ddb6f..b7a5fd72f7d 100644
402402
--- a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
403403
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
404404
@@ -22,6 +22,7 @@ import org.scalatest.GivenWhenThen
@@ -496,21 +496,13 @@ index f33432ddb6f..99729e465e4 100644
496496
}
497497
assert(subqueryBroadcastExecs.size === 1)
498498
subqueryBroadcastExecs.foreach { subqueryBroadcastExec =>
499-
@@ -1698,7 +1718,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
500-
* Check the static scan metrics with and without DPP
501-
*/
502-
test("static scan metrics",
503-
- DisableAdaptiveExecution("DPP in AQE must reuse broadcast")) {
504-
+ DisableAdaptiveExecution("DPP in AQE must reuse broadcast"),
505-
+ IgnoreCometNativeDataFusion("https://github.com/apache/datafusion-comet/issues/3442")) {
506-
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
507-
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
508-
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
509-
@@ -1729,6 +1750,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
499+
@@ -1729,6 +1749,10 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
510500
case s: BatchScanExec =>
511501
// we use f1 col for v2 tables due to schema pruning
512502
s.output.exists(_.exists(_.argString(maxFields = 100).contains("f1")))
513503
+ case s: CometScanExec =>
504+
+ s.output.exists(_.exists(_.argString(maxFields = 100).contains("fid")))
505+
+ case s: CometNativeScanExec =>
514506
+ s.output.exists(_.exists(_.argString(maxFields = 100).contains("fid")))
515507
case _ => false
516508
}

dev/diffs/4.0.1.diff

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -589,7 +589,7 @@ index 81713c777bc..b5f92ed9742 100644
589589
assert(exchanges.size == 2)
590590
}
591591
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
592-
index 2c24cc7d570..e68e44be724 100644
592+
index 2c24cc7d570..63047ef482e 100644
593593
--- a/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
594594
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DynamicPartitionPruningSuite.scala
595595
@@ -22,6 +22,7 @@ import org.scalatest.GivenWhenThen
@@ -705,21 +705,13 @@ index 2c24cc7d570..e68e44be724 100644
705705
}
706706
assert(subqueryBroadcastExecs.size === 1)
707707
subqueryBroadcastExecs.foreach { subqueryBroadcastExec =>
708-
@@ -1699,7 +1721,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
709-
* Check the static scan metrics with and without DPP
710-
*/
711-
test("static scan metrics",
712-
- DisableAdaptiveExecution("DPP in AQE must reuse broadcast")) {
713-
+ DisableAdaptiveExecution("DPP in AQE must reuse broadcast"),
714-
+ IgnoreCometNativeDataFusion("https://github.com/apache/datafusion-comet/issues/3442")) {
715-
withSQLConf(SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
716-
SQLConf.DYNAMIC_PARTITION_PRUNING_REUSE_BROADCAST_ONLY.key -> "false",
717-
SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
718-
@@ -1730,6 +1753,8 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
708+
@@ -1730,6 +1752,10 @@ abstract class DynamicPartitionPruningV1Suite extends DynamicPartitionPruningDat
719709
case s: BatchScanExec =>
720710
// we use f1 col for v2 tables due to schema pruning
721711
s.output.exists(_.exists(_.argString(maxFields = 100).contains("f1")))
722712
+ case s: CometScanExec =>
713+
+ s.output.exists(_.exists(_.argString(maxFields = 100).contains("fid")))
714+
+ case s: CometNativeScanExec =>
723715
+ s.output.exists(_.exists(_.argString(maxFields = 100).contains("fid")))
724716
case _ => false
725717
}

0 commit comments

Comments
 (0)