Skip to content

Commit f3480b5

Browse files
andygroveclaude
andcommitted
Remove BatchReader-only test (partition column types)
This test directly instantiated BatchReader which is now deprecated. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent b198adc commit f3480b5

1 file changed

Lines changed: 0 additions & 71 deletions

File tree

spark/src/test/scala/org/apache/comet/parquet/ParquetReadSuite.scala

Lines changed: 0 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@ import org.apache.parquet.example.data.simple.SimpleGroup
3636
import org.apache.parquet.schema.MessageTypeParser
3737
import org.apache.spark.SparkException
3838
import org.apache.spark.sql.{CometTestBase, DataFrame, Row}
39-
import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
4039
import org.apache.spark.sql.catalyst.util.DateTimeUtils
4140
import org.apache.spark.sql.comet.{CometBatchScanExec, CometNativeScanExec, CometScanExec}
4241
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
@@ -702,76 +701,6 @@ abstract class ParquetReadSuite extends CometTestBase {
702701
}
703702
}
704703

705-
test("partition column types") {
706-
withTempPath { dir =>
707-
Seq(1).toDF().repartition(1).write.parquet(dir.getCanonicalPath)
708-
709-
val dataTypes =
710-
Seq(
711-
StringType,
712-
BooleanType,
713-
ByteType,
714-
BinaryType,
715-
ShortType,
716-
IntegerType,
717-
LongType,
718-
FloatType,
719-
DoubleType,
720-
DecimalType(25, 5),
721-
DateType,
722-
TimestampType)
723-
724-
// TODO: support `NullType` here, after we add the support in `ColumnarBatchRow`
725-
val constantValues =
726-
Seq(
727-
UTF8String.fromString("a string"),
728-
true,
729-
1.toByte,
730-
"Spark SQL".getBytes,
731-
2.toShort,
732-
3,
733-
Long.MaxValue,
734-
0.25.toFloat,
735-
0.75d,
736-
Decimal("1234.23456"),
737-
DateTimeUtils.fromJavaDate(java.sql.Date.valueOf("2015-01-01")),
738-
DateTimeUtils.fromJavaTimestamp(java.sql.Timestamp.valueOf("2015-01-01 23:50:59.123")))
739-
740-
dataTypes.zip(constantValues).foreach { case (dt, v) =>
741-
val schema = StructType(StructField("pcol", dt) :: Nil)
742-
val conf = SQLConf.get
743-
val partitionValues = new GenericInternalRow(Array(v))
744-
val file = dir
745-
.listFiles(new FileFilter {
746-
override def accept(pathname: File): Boolean =
747-
pathname.isFile && pathname.toString.endsWith("parquet")
748-
})
749-
.head
750-
val reader = new BatchReader(
751-
file.toString,
752-
CometConf.COMET_BATCH_SIZE.get(conf),
753-
schema,
754-
partitionValues)
755-
reader.init()
756-
757-
try {
758-
reader.nextBatch()
759-
val batch = reader.currentBatch()
760-
val actual = batch.getRow(0).get(1, dt)
761-
val expected = v
762-
if (dt.isInstanceOf[BinaryType]) {
763-
assert(
764-
actual.asInstanceOf[Array[Byte]] sameElements expected.asInstanceOf[Array[Byte]])
765-
} else {
766-
assert(actual == expected)
767-
}
768-
} finally {
769-
reader.close()
770-
}
771-
}
772-
}
773-
}
774-
775704
test("partition columns - multiple batch") {
776705
withSQLConf(
777706
CometConf.COMET_BATCH_SIZE.key -> 7.toString,

0 commit comments

Comments
 (0)