|
6 | 6 |
|
7 | 7 | import static io.airbyte.cdk.db.jdbc.DateTimeConverter.putJavaSQLTime;
|
8 | 8 | import static io.airbyte.integrations.destination.redshift.operations.RedshiftSqlOperations.escapeStringLiteral;
|
| 9 | +import static org.jooq.impl.DSL.createView; |
| 10 | +import static org.jooq.impl.DSL.quotedName; |
| 11 | +import static org.jooq.impl.DSL.select; |
| 12 | +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; |
9 | 13 | import static org.junit.jupiter.api.Assertions.assertEquals;
|
10 | 14 | import static org.junit.jupiter.api.Assertions.assertFalse;
|
| 15 | +import static org.junit.jupiter.api.Assertions.assertThrowsExactly; |
11 | 16 | import static org.junit.jupiter.api.Assertions.assertTrue;
|
12 | 17 |
|
13 | 18 | import com.fasterxml.jackson.databind.JsonNode;
|
|
19 | 24 | import io.airbyte.cdk.db.jdbc.JdbcUtils;
|
20 | 25 | import io.airbyte.cdk.integrations.destination.jdbc.typing_deduping.JdbcSqlGenerator;
|
21 | 26 | import io.airbyte.cdk.integrations.standardtest.destination.typing_deduping.JdbcSqlGeneratorIntegrationTest;
|
| 27 | +import io.airbyte.commons.exceptions.ConfigErrorException; |
22 | 28 | import io.airbyte.commons.json.Jsons;
|
23 | 29 | import io.airbyte.integrations.base.destination.typing_deduping.DestinationHandler;
|
24 | 30 | import io.airbyte.integrations.base.destination.typing_deduping.DestinationInitialStatus;
|
|
39 | 45 | import org.jooq.DataType;
|
40 | 46 | import org.jooq.Field;
|
41 | 47 | import org.jooq.SQLDialect;
|
| 48 | +import org.jooq.conf.ParamType; |
42 | 49 | import org.jooq.conf.Settings;
|
43 | 50 | import org.jooq.impl.DSL;
|
44 | 51 | import org.jooq.impl.DefaultDataType;
|
@@ -143,7 +150,7 @@ public static void teardownRedshift() throws Exception {
|
143 | 150 |
|
144 | 151 | @Override
|
145 | 152 | protected JdbcSqlGenerator getSqlGenerator() {
|
146 |
| - return new RedshiftSqlGenerator(new RedshiftSQLNameTransformer()) { |
| 153 | + return new RedshiftSqlGenerator(new RedshiftSQLNameTransformer(), false) { |
147 | 154 |
|
148 | 155 | // Override only for tests to print formatted SQL. The actual implementation should use unformatted
|
149 | 156 | // to save bytes.
|
@@ -193,4 +200,41 @@ public void testCreateTableIncremental() throws Exception {
|
193 | 200 | // TODO assert on table clustering, etc.
|
194 | 201 | }
|
195 | 202 |
|
| 203 | + /** |
| 204 | + * Verify that we correctly DROP...CASCADE the final table when cascadeDrop is enabled. |
| 205 | + */ |
| 206 | + @Test |
| 207 | + public void testCascadeDropEnabled() throws Exception { |
| 208 | + // Explicitly create a sqlgenerator with cascadeDrop=true |
| 209 | + final RedshiftSqlGenerator generator = new RedshiftSqlGenerator(new RedshiftSQLNameTransformer(), true); |
| 210 | + // Create a table, then create a view referencing it |
| 211 | + getDestinationHandler().execute(generator.createTable(getIncrementalAppendStream(), "", false)); |
| 212 | + database.execute(createView(quotedName(getIncrementalAppendStream().getId().getFinalNamespace(), "example_view")) |
| 213 | + .as(select().from(quotedName(getIncrementalAppendStream().getId().getFinalNamespace(), getIncrementalAppendStream().getId().getFinalName()))) |
| 214 | + .getSQL(ParamType.INLINED)); |
| 215 | + // Create a "soft reset" table |
| 216 | + getDestinationHandler().execute(generator.createTable(getIncrementalDedupStream(), "_soft_reset", false)); |
| 217 | + |
| 218 | + // Overwriting the first table with the second table should succeed. |
| 219 | + assertDoesNotThrow(() -> getDestinationHandler().execute(generator.overwriteFinalTable(getIncrementalDedupStream().getId(), "_soft_reset"))); |
| 220 | + } |
| 221 | + |
| 222 | + @Test |
| 223 | + public void testCascadeDropDisabled() throws Exception { |
| 224 | + // Explicitly create a sqlgenerator with cascadeDrop=false |
| 225 | + final RedshiftSqlGenerator generator = new RedshiftSqlGenerator(new RedshiftSQLNameTransformer(), false); |
| 226 | + // Create a table, then create a view referencing it |
| 227 | + getDestinationHandler().execute(generator.createTable(getIncrementalAppendStream(), "", false)); |
| 228 | + database.execute(createView(quotedName(getIncrementalAppendStream().getId().getFinalNamespace(), "example_view")) |
| 229 | + .as(select().from(quotedName(getIncrementalAppendStream().getId().getFinalNamespace(), getIncrementalAppendStream().getId().getFinalName()))) |
| 230 | + .getSQL(ParamType.INLINED)); |
| 231 | + // Create a "soft reset" table |
| 232 | + getDestinationHandler().execute(generator.createTable(getIncrementalDedupStream(), "_soft_reset", false)); |
| 233 | + |
| 234 | + // Overwriting the first table with the second table should fal with a configurationError. |
| 235 | + Throwable t = assertThrowsExactly(ConfigErrorException.class, |
| 236 | + () -> getDestinationHandler().execute(generator.overwriteFinalTable(getIncrementalDedupStream().getId(), "_soft_reset"))); |
| 237 | + assertTrue(t.getMessage().equals("Failed to drop table without the CASCADE option. Consider changing the drop_cascade configuration parameter")); |
| 238 | + } |
| 239 | + |
196 | 240 | }
|
0 commit comments