Skip to content

Commit

Permalink
apply correct code style
Browse files Browse the repository at this point in the history
  • Loading branch information
twuebi committed Feb 26, 2025
1 parent d377ecc commit 1ed80de
Show file tree
Hide file tree
Showing 3 changed files with 112 additions and 83 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -227,15 +227,13 @@ public static void main(String[] args)
}
}

public static final class IcebergLakekeeperQueryRunnerMain {
private IcebergLakekeeperQueryRunnerMain() {
}
public static final class IcebergLakekeeperQueryRunnerMain
{
private IcebergLakekeeperQueryRunnerMain() {}

public static void main(String[] args)
throws Exception {


@SuppressWarnings("resource")
throws Exception
{
TestingLakekeeperCatalog lakekeeperCatalog = new TestingLakekeeperCatalog();
Logger log = Logger.get(IcebergLakekeeperQueryRunnerMain.class);
try {
Expand All @@ -245,21 +243,22 @@ public static void main(String[] args)
.addIcebergProperty("iceberg.rest-catalog.uri", lakekeeperCatalog.restUri() + "/catalog")
.addIcebergProperty("iceberg.rest-catalog.warehouse", TestingLakekeeperCatalog.DEFAULT_WAREHOUSE)
.addIcebergProperty("iceberg.rest-catalog.vended-credentials-enabled", "true")
.addIcebergProperty("s3.endpoint", "http://localhost:" + lakekeeperCatalog.minioPort())
.addIcebergProperty("s3.endpoint", lakekeeperCatalog.externalMinioAddress())
.addIcebergProperty("fs.native-s3.enabled", "true")
.addIcebergProperty("s3.region", "dummy")
.addIcebergProperty("s3.path-style-access", "true")
.setInitialTables(TpchTable.getTables())
.build();
log.info("======== SERVER STARTED ========");
log.info("\n====\n%s\n====", queryRunner.getCoordinator().getBaseUrl());
} catch (Exception e) {
}
catch (Exception e) {
log.error("Error starting server: \n%s", lakekeeperCatalog.getLogs());
throw e;
} finally {
}
finally {
lakekeeperCatalog.close();
}

}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,29 +38,32 @@
import java.io.IOException;
import java.io.UncheckedIOException;

import static io.trino.plugin.iceberg.IcebergTestUtils.*;
import static io.trino.plugin.iceberg.IcebergTestUtils.checkOrcFileSorting;
import static io.trino.plugin.iceberg.IcebergTestUtils.checkParquetFileSorting;
import static io.trino.testing.TestingNames.randomNameSuffix;
import static io.trino.testing.containers.Minio.MINIO_SECRET_KEY;
import static io.trino.testing.containers.Minio.MINIO_ACCESS_KEY;
import static org.apache.iceberg.FileFormat.PARQUET;

import static io.trino.testing.containers.Minio.MINIO_SECRET_KEY;
import static java.lang.String.format;
import static org.apache.iceberg.FileFormat.PARQUET;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat;
import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS;

@Isolated // TODO remove
@TestInstance(PER_CLASS)
final class TestIcebergLakekeeperCatalogConnectorSmokeTest
extends BaseIcebergConnectorSmokeTest {
extends BaseIcebergConnectorSmokeTest
{
private TestingLakekeeperCatalog lakekeeperCatalog;

public TestIcebergLakekeeperCatalogConnectorSmokeTest() {
public TestIcebergLakekeeperCatalogConnectorSmokeTest()
{
super(new IcebergConfig().getFileFormat().toIceberg());
}

@Override
protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) {
protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior)
{
return switch (connectorBehavior) {
case SUPPORTS_CREATE_MATERIALIZED_VIEW,
SUPPORTS_RENAME_SCHEMA -> false;
Expand All @@ -70,7 +73,8 @@ protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) {

@Override
protected QueryRunner createQueryRunner()
throws Exception {
throws Exception
{
lakekeeperCatalog = closeAfterClass(new TestingLakekeeperCatalog("125ms"));

Check failure on line 78 in plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestIcebergLakekeeperCatalogConnectorSmokeTest.java

View workflow job for this annotation

GitHub Actions / test (plugin/trino-iceberg)

TestIcebergLakekeeperCatalogConnectorSmokeTest.

Container startup failed for image minio/minio:RELEASE.2024-12-18T13-15-44Z

return IcebergQueryRunner.builder()
Expand All @@ -96,19 +100,20 @@ protected QueryRunner createQueryRunner()
}

@Override
protected void dropTableFromMetastore(String tableName) {
protected void dropTableFromMetastore(String tableName)
{
lakekeeperCatalog.dropWithoutPurge(getSession().getSchema().orElseThrow(), tableName);
}

@Override
protected String getMetadataLocation(String tableName) {
protected String getMetadataLocation(String tableName)
{
TrinoCatalogFactory catalogFactory = ((IcebergConnector) getQueryRunner().getCoordinator().getConnector("iceberg")).getInjector().getInstance(TrinoCatalogFactory.class);
TrinoCatalog trinoCatalog = catalogFactory.create(getSession().getIdentity().toConnectorIdentity());
BaseTable table = trinoCatalog.loadTable(getSession().toConnectorSession(), new SchemaTableName(getSession().getSchema().orElseThrow(), tableName));
return table.operations().current().metadataFileLocation();
}


@Override
@Test
@Disabled("Disabled since unregister -> register is currently unsupported by lakekeeper due to defaulting to purgeRequested = true.")
Expand All @@ -124,40 +129,47 @@ public void testRepeatUnregisterTable()
}

@Override
protected String schemaPath() {
protected String schemaPath()
{
return format("s3://%s/%s", TestingLakekeeperCatalog.DEFAULT_BUCKET, getSession().getSchema().orElseThrow());
}

@Override
protected boolean locationExists(String location) {
protected boolean locationExists(String location)
{
try {
// we're using s3 which doesn't have the notation of a directory,
// so we just check if there are any files
return fileSystem.listFiles(Location.of(location)).hasNext();
} catch (IOException e) {
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}

@Override
protected boolean isFileSorted(Location path, String sortColumnName) {
protected boolean isFileSorted(Location path, String sortColumnName)
{
if (format == PARQUET) {
return checkParquetFileSorting(fileSystem.newInputFile(path), sortColumnName);
}
return checkOrcFileSorting(fileSystem, path, sortColumnName);
}

@Override
protected void deleteDirectory(String location) {
protected void deleteDirectory(String location)
{
try {
fileSystem.deleteDirectory(Location.of(location));
} catch (IOException e) {
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
}

@Test
void testNestedNamespace() {
void testNestedNamespace()
{
String parentNamespace = "level1_" + randomNameSuffix();
String nestedNamespace = parentNamespace + ".level2_" + randomNameSuffix();

Expand All @@ -172,35 +184,38 @@ void testNestedNamespace() {

@Test
@Override
public void testMaterializedView() {
public void testMaterializedView()
{
assertThatThrownBy(super::testMaterializedView)
.hasMessageContaining("createMaterializedView is not supported for Iceberg REST catalog");
}

@Test
@Override
public void testRenameSchema() {
public void testRenameSchema()
{
assertThatThrownBy(super::testRenameSchema)
.hasMessageContaining("renameNamespace is not supported for Iceberg REST catalog");
}


@Test
@Override
public void testCreateTableWithTrailingSpaceInLocation() {
public void testCreateTableWithTrailingSpaceInLocation()
{
assertThatThrownBy(super::testCreateTableWithTrailingSpaceInLocation).hasStackTraceContaining("Trailing whitespaces are forbidden");
}

@Test
@Override
public void testRegisterTableWithTrailingSpaceInLocation() {
public void testRegisterTableWithTrailingSpaceInLocation()
{
assertThatThrownBy(super::testCreateTableWithTrailingSpaceInLocation).hasStackTraceContaining("Trailing whitespaces are forbidden");
}


@Test
@Override
public void testShowCreateTable() {
public void testShowCreateTable()
{
String schemaName = getSession().getSchema().orElseThrow();
Assertions.assertThat((String) computeScalar("SHOW CREATE TABLE region"))
.matches("CREATE TABLE iceberg." + schemaName + ".region \\(\n" +
Expand All @@ -221,7 +236,8 @@ public void testShowCreateTable() {
@Override
@Test
public void testDropTableWithMissingMetadataFile()
throws Exception {
throws Exception
{
String tableName = "test_drop_table_with_missing_metadata_file_" + randomNameSuffix();
assertUpdate("CREATE TABLE " + tableName + " AS SELECT 1 x, 'INDIA' y", 1);

Expand All @@ -241,7 +257,8 @@ public void testDropTableWithMissingMetadataFile()
// gotta wait for the task queue to run the cleanup job
try {
Thread.sleep(1100);
} catch (InterruptedException e) {
}
catch (InterruptedException e) {
throw new RuntimeException(e);
}
Assertions.assertThat(fileSystem.listFiles(tableLocation).hasNext())
Expand All @@ -253,7 +270,8 @@ public void testDropTableWithMissingMetadataFile()
@Override
@Test
public void testDropTableWithMissingManifestListFile()
throws Exception {
throws Exception
{
String tableName = "test_drop_table_with_missing_manifest_list_file_" + randomNameSuffix();
assertUpdate("CREATE TABLE " + tableName + " AS SELECT 1 x, 'INDIA' y", 1);

Expand All @@ -276,7 +294,8 @@ public void testDropTableWithMissingManifestListFile()
// gotta wait for the task queue to run the cleanup job
try {
Thread.sleep(1100);
} catch (InterruptedException e) {
}
catch (InterruptedException e) {
throw new RuntimeException(e);
}
Assertions.assertThat(fileSystem.listFiles(tableLocation).hasNext())
Expand Down Expand Up @@ -313,15 +332,15 @@ public void testDropTableWithMissingDataFile()
// gotta wait for the task queue to run the cleanup job
try {
Thread.sleep(1100);
} catch (InterruptedException e) {
}
catch (InterruptedException e) {
throw new RuntimeException(e);
}
Assertions.assertThat(fileSystem.listFiles(tableLocation).hasNext())
.describedAs(String.format("Table location: %s should not exist", tableLocation))
.isFalse();
}


// Lakekeeper does async drops, so we need to wait for the drop to complete
@Test
@Override
Expand All @@ -347,15 +366,15 @@ public void testDropTableWithMissingSnapshotFile()
Assertions.assertThat(getQueryRunner().tableExists(getSession(), tableName)).isFalse();
try {
Thread.sleep(1100);
} catch (InterruptedException e) {
}
catch (InterruptedException e) {
throw new RuntimeException(e);
}
Assertions.assertThat(fileSystem.listFiles(tableLocation).hasNext())
.describedAs(String.format("Table location: %s should not exist", tableLocation))
.isFalse();
}


@Test
@Override
public void testRegisterTableWithDroppedTable()
Expand All @@ -372,7 +391,8 @@ public void testRegisterTableWithDroppedTable()

try {
Thread.sleep(1100);
} catch (InterruptedException e) {
}
catch (InterruptedException e) {
throw new RuntimeException(e);
}

Expand All @@ -383,14 +403,14 @@ public void testRegisterTableWithDroppedTable()
@Test
@Override
@Disabled("Disabled due to https://github.com/trinodb/trino/issues/23941")
public void testDeleteRowsConcurrently() {

public void testDeleteRowsConcurrently()
{
}

@Test
@Override
public void testUnregisterTableNotExistingTable() {
public void testUnregisterTableNotExistingTable()
{
assertThatThrownBy(super::testUnregisterTableNotExistingTable).hasStackTraceContaining("Forbidden: Table action can_drop forbidden for Anonymous");
}

}
Loading

0 comments on commit 1ed80de

Please # to comment.