-
Notifications
You must be signed in to change notification settings - Fork 2.4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Spark: Test metadata tables with format-version=3 #12135
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -31,11 +31,14 @@ | |
import java.util.stream.Stream; | ||
import org.apache.avro.generic.GenericData.Record; | ||
import org.apache.commons.collections.ListUtils; | ||
import org.apache.iceberg.CatalogProperties; | ||
import org.apache.iceberg.FileContent; | ||
import org.apache.iceberg.HasTableOperations; | ||
import org.apache.iceberg.HistoryEntry; | ||
import org.apache.iceberg.ManifestFile; | ||
import org.apache.iceberg.Parameter; | ||
import org.apache.iceberg.ParameterizedTestExtension; | ||
import org.apache.iceberg.Parameters; | ||
import org.apache.iceberg.Schema; | ||
import org.apache.iceberg.Snapshot; | ||
import org.apache.iceberg.Table; | ||
|
@@ -44,8 +47,10 @@ | |
import org.apache.iceberg.io.CloseableIterable; | ||
import org.apache.iceberg.io.InputFile; | ||
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; | ||
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; | ||
import org.apache.iceberg.relocated.com.google.common.collect.Lists; | ||
import org.apache.iceberg.spark.Spark3Util; | ||
import org.apache.iceberg.spark.SparkCatalogConfig; | ||
import org.apache.iceberg.spark.SparkSchemaUtil; | ||
import org.apache.iceberg.spark.data.TestHelpers; | ||
import org.apache.iceberg.spark.source.SimpleRecord; | ||
|
@@ -62,6 +67,44 @@ | |
|
||
@ExtendWith(ParameterizedTestExtension.class) | ||
public class TestMetadataTables extends ExtensionsTestBase { | ||
@Parameter(index = 3) | ||
private int formatVersion; | ||
|
||
@Parameters(name = "catalogName = {0}, implementation = {1}, config = {2}, formatVersion = {3}") | ||
protected static Object[][] parameters() { | ||
return new Object[][] { | ||
{ | ||
SparkCatalogConfig.SPARK.catalogName(), | ||
SparkCatalogConfig.SPARK.implementation(), | ||
SparkCatalogConfig.SPARK.properties(), | ||
2 | ||
}, | ||
{ | ||
SparkCatalogConfig.SPARK.catalogName(), | ||
SparkCatalogConfig.SPARK.implementation(), | ||
SparkCatalogConfig.SPARK.properties(), | ||
3 | ||
}, | ||
{ | ||
SparkCatalogConfig.REST.catalogName(), | ||
SparkCatalogConfig.REST.implementation(), | ||
ImmutableMap.builder() | ||
.putAll(SparkCatalogConfig.REST.properties()) | ||
.put(CatalogProperties.URI, restCatalog.properties().get(CatalogProperties.URI)) | ||
.build(), | ||
2 | ||
}, | ||
{ | ||
SparkCatalogConfig.REST.catalogName(), | ||
SparkCatalogConfig.REST.implementation(), | ||
ImmutableMap.builder() | ||
.putAll(SparkCatalogConfig.REST.properties()) | ||
.put(CatalogProperties.URI, restCatalog.properties().get(CatalogProperties.URI)) | ||
.build(), | ||
3 | ||
} | ||
}; | ||
} | ||
|
||
@AfterEach | ||
public void removeTables() { | ||
|
@@ -72,8 +115,8 @@ public void removeTables() { | |
public void testUnpartitionedTable() throws Exception { | ||
sql( | ||
"CREATE TABLE %s (id bigint, data string) USING iceberg TBLPROPERTIES" | ||
+ "('format-version'='2', 'write.delete.mode'='merge-on-read')", | ||
tableName); | ||
+ "('format-version'='%s', 'write.delete.mode'='merge-on-read')", | ||
tableName, formatVersion); | ||
|
||
List<SimpleRecord> records = | ||
Lists.newArrayList( | ||
|
@@ -142,15 +185,43 @@ public void testUnpartitionedTable() throws Exception { | |
TestHelpers.nonDerivedSchema(actualFilesDs), expectedFiles.get(1), actualFiles.get(1)); | ||
} | ||
|
||
@TestTemplate | ||
public void testPositionDeletesTable() throws Exception { | ||
sql( | ||
"CREATE TABLE %s (id bigint, data string) USING iceberg TBLPROPERTIES" | ||
+ "('format-version'='%s', 'write.delete.mode'='merge-on-read')", | ||
tableName, formatVersion); | ||
|
||
List<SimpleRecord> records = | ||
Lists.newArrayList( | ||
new SimpleRecord(1, "a"), | ||
new SimpleRecord(2, "b"), | ||
new SimpleRecord(3, "c"), | ||
new SimpleRecord(4, "d")); | ||
spark | ||
.createDataset(records, Encoders.bean(SimpleRecord.class)) | ||
.coalesce(1) | ||
.writeTo(tableName) | ||
.append(); | ||
|
||
sql("DELETE FROM %s WHERE id=1 OR id=3", tableName); | ||
|
||
// check delete files table | ||
assertThat(sql("SELECT * FROM %s.delete_files", tableName)).hasSize(1); | ||
|
||
// check position_deletes table | ||
assertThat(sql("SELECT * FROM %s.position_deletes", tableName)).hasSize(2); | ||
Comment on lines
+210
to
+213
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we assert the contents of the rows? I can see how it's a bit of a pain between v2/v3 but feels like a stronger assertion. |
||
} | ||
|
||
@TestTemplate | ||
public void testPartitionedTable() throws Exception { | ||
sql( | ||
"CREATE TABLE %s (id bigint, data string) " | ||
+ "USING iceberg " | ||
+ "PARTITIONED BY (data) " | ||
+ "TBLPROPERTIES" | ||
+ "('format-version'='2', 'write.delete.mode'='merge-on-read')", | ||
tableName); | ||
+ "('format-version'='%s', 'write.delete.mode'='merge-on-read')", | ||
tableName, formatVersion); | ||
|
||
List<SimpleRecord> recordsA = | ||
Lists.newArrayList(new SimpleRecord(1, "a"), new SimpleRecord(2, "a")); | ||
|
@@ -241,8 +312,8 @@ public void testPartitionedTable() throws Exception { | |
public void testAllFilesUnpartitioned() throws Exception { | ||
sql( | ||
"CREATE TABLE %s (id bigint, data string) USING iceberg TBLPROPERTIES" | ||
+ "('format-version'='2', 'write.delete.mode'='merge-on-read')", | ||
tableName); | ||
+ "('format-version'='%s', 'write.delete.mode'='merge-on-read')", | ||
tableName, formatVersion); | ||
|
||
List<SimpleRecord> records = | ||
Lists.newArrayList( | ||
|
@@ -319,8 +390,8 @@ public void testAllFilesPartitioned() throws Exception { | |
+ "USING iceberg " | ||
+ "PARTITIONED BY (data) " | ||
+ "TBLPROPERTIES" | ||
+ "('format-version'='2', 'write.delete.mode'='merge-on-read')", | ||
tableName); | ||
+ "('format-version'='%s', 'write.delete.mode'='merge-on-read')", | ||
tableName, formatVersion); | ||
|
||
List<SimpleRecord> recordsA = | ||
Lists.newArrayList(new SimpleRecord(1, "a"), new SimpleRecord(2, "a")); | ||
|
@@ -409,8 +480,8 @@ public void testMetadataLogEntries() throws Exception { | |
+ "USING iceberg " | ||
+ "PARTITIONED BY (data) " | ||
+ "TBLPROPERTIES " | ||
+ "('format-version'='2')", | ||
tableName); | ||
+ "('format-version'='%s')", | ||
tableName, formatVersion); | ||
|
||
List<SimpleRecord> recordsA = | ||
Lists.newArrayList(new SimpleRecord(1, "a"), new SimpleRecord(2, "a")); | ||
|
@@ -498,8 +569,8 @@ public void testFilesTableTimeTravelWithSchemaEvolution() throws Exception { | |
+ "USING iceberg " | ||
+ "PARTITIONED BY (data) " | ||
+ "TBLPROPERTIES" | ||
+ "('format-version'='2', 'write.delete.mode'='merge-on-read')", | ||
tableName); | ||
+ "('format-version'='%s', 'write.delete.mode'='merge-on-read')", | ||
tableName, formatVersion); | ||
|
||
List<SimpleRecord> recordsA = | ||
Lists.newArrayList(new SimpleRecord(1, "a"), new SimpleRecord(2, "a")); | ||
|
@@ -561,8 +632,8 @@ public void testSnapshotReferencesMetatable() throws Exception { | |
+ "USING iceberg " | ||
+ "PARTITIONED BY (data) " | ||
+ "TBLPROPERTIES" | ||
+ "('format-version'='2', 'write.delete.mode'='merge-on-read')", | ||
tableName); | ||
+ "('format-version'='%s', 'write.delete.mode'='merge-on-read')", | ||
tableName, formatVersion); | ||
|
||
List<SimpleRecord> recordsA = | ||
Lists.newArrayList(new SimpleRecord(1, "a"), new SimpleRecord(2, "a")); | ||
|
@@ -753,8 +824,8 @@ public void metadataLogEntriesAfterReplacingTable() throws Exception { | |
+ "USING iceberg " | ||
+ "PARTITIONED BY (data) " | ||
+ "TBLPROPERTIES " | ||
+ "('format-version'='2')", | ||
tableName); | ||
+ "('format-version'='%s')", | ||
tableName, formatVersion); | ||
|
||
Table table = Spark3Util.loadIcebergTable(spark, tableName); | ||
TableMetadata tableMetadata = ((HasTableOperations) table).operations().current(); | ||
|
@@ -813,8 +884,8 @@ public void metadataLogEntriesAfterReplacingTable() throws Exception { | |
+ "USING iceberg " | ||
+ "PARTITIONED BY (data) " | ||
+ "TBLPROPERTIES " | ||
+ "('format-version'='2')", | ||
tableName); | ||
+ "('format-version'='%s')", | ||
tableName, formatVersion); | ||
|
||
tableMetadata = ((HasTableOperations) table).operations().refresh(); | ||
assertThat(tableMetadata.snapshots()).hasSize(2); | ||
|
Original file line number | Diff line number | Diff line change | ||
---|---|---|---|---|
|
@@ -84,6 +84,9 @@ public InternalRow next() { | |||
rowValues.add(deleteFile.contentOffset()); | ||||
} else if (fieldId == MetadataColumns.CONTENT_SIZE_IN_BYTES_COLUMN_ID) { | ||||
rowValues.add(ScanTaskUtil.contentSizeInBytes(deleteFile)); | ||||
} else if (fieldId == MetadataColumns.DELETE_FILE_ROW_FIELD_ID) { | ||||
// DVs don't track the row that was deleted | ||||
rowValues.add(null); | ||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this fixes an issue when reading from the
|
||||
} | ||||
} | ||||
|
||||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
as defined in
CatalogTestBase
the default catalogs that were previously tested were Hadoop/Hive/Spark/REST. I'm limiting this here to Spark + REST, since we're adding testing for format version 2 + 3 hereThere was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
does this mean we drop the coverage for hadoop/hive catalog for format version 2 while add format version 3 for spark and rest catalog?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yes, but the important piece here is rather to test V2 and V3. I don't think we want to test all possible combinations of catalogs and V2 + V3 as that would be a large test matrix
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think we can just add following lines to keep the original v2 coverage and then selectively enable v3 for catalogs which are ready. Since many iceberg users are still in the process of adopting REST based catalog and v2 is our default spec at the moment, I think keep the current coverage in place to test against metadata table change would be essential.