Skip to content

Commit 30cca6d

Browse files
committed
Make explain plan queries honor query context from SET statements.
- EXPLAIN PLAN queries were ignoring the query context parameters from the SET statement. - This was because the planner config was eagerly built before the set statement list from the sql query is processed and the query context is extracted from it. - This patch now overrides the planner config in processStatementList. In order to do this override, the field is made non-final (similar to other non-final mutable members in PlannerContext).
1 parent a23170e commit 30cca6d

File tree

4 files changed

+132
-3
lines changed

4 files changed

+132
-3
lines changed

sql/src/main/java/org/apache/druid/sql/calcite/planner/DruidPlanner.java

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -297,8 +297,9 @@ public void close()
297297
* If an {@link SqlNode} is a {@link SqlNodeList}, it must consist of 0 or more {@link SqlSetOption} followed by a
298298
* single {@link SqlNode} which is NOT a {@link SqlSetOption}. All {@link SqlSetOption} will be converted into a
299299
* context parameters {@link Map} and added to the {@link PlannerContext} with
300-
* {@link PlannerContext#addAllToQueryContext(Map)}. The final {@link SqlNode} of the {@link SqlNodeList} is returned
301-
* by this method as the {@link SqlNode} which should actually be validated and executed, and will have access to the
300+
* {@link PlannerContext#addAllToQueryContext(Map)} and {@link PlannerContext#addAllToPlannerConfig(Map)}.
301+
* The final {@link SqlNode} of the {@link SqlNodeList} is returned by this method as the {@link SqlNode} which
302+
* should actually be validated and executed, and will have access to the
302303
* modified query context through the {@link PlannerContext}. {@link SqlSetOption} override any existing query
303304
* context parameter values.
304305
*/
@@ -349,6 +350,7 @@ private SqlNode processStatementList(SqlNode root)
349350
throw InvalidSqlInput.exception("Statement list is missing a non-SET statement to execute");
350351
}
351352
plannerContext.addAllToQueryContext(contextMap);
353+
plannerContext.addAllToPlannerConfig(contextMap);
352354
}
353355
return root;
354356
}

sql/src/main/java/org/apache/druid/sql/calcite/planner/PlannerContext.java

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,13 +128,13 @@ public class PlannerContext
128128
private final PlannerToolbox plannerToolbox;
129129
private final ExpressionParser expressionParser;
130130
private final String sql;
131-
private final PlannerConfig plannerConfig;
132131
private final SqlEngine engine;
133132
private final Map<String, Object> queryContext;
134133
private final CopyOnWriteArrayList<String> nativeQueryIds = new CopyOnWriteArrayList<>();
135134
private final PlannerHook hook;
136135
private final Set<String> lookupsToLoad = new HashSet<>();
137136

137+
private PlannerConfig plannerConfig;
138138
private String sqlQueryId;
139139
private boolean stringifyArrays;
140140
private boolean useBoundsAndSelectors;
@@ -559,6 +559,14 @@ public void addAllToQueryContext(Map<String, Object> toAdd)
559559
initializeContextFields();
560560
}
561561

562+
/**
563+
* Add additional query context parameters to planner config, overriding any existing values.
564+
*/
565+
public void addAllToPlannerConfig(Map<String, Object> toAdd)
566+
{
567+
this.plannerConfig = this.plannerConfig.withOverrides(toAdd);
568+
}
569+
562570
public SqlEngine getEngine()
563571
{
564572
return engine;

sql/src/test/java/org/apache/druid/sql/calcite/CalciteExplainQueryTest.java

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
package org.apache.druid.sql.calcite;
2121

2222
import com.google.common.collect.ImmutableList;
23+
import com.google.common.collect.ImmutableMap;
2324
import org.apache.druid.sql.calcite.planner.PlannerConfig;
2425
import org.apache.druid.sql.calcite.util.CalciteTests;
2526
import org.junit.jupiter.api.Test;
@@ -113,6 +114,80 @@ public void testExplainExactCountDistinctOfSemiJoinResult()
113114
);
114115
}
115116

117+
@Test
118+
public void testSetStatementWithExplainSanity()
119+
{
120+
// Skip vectorization since otherwise the "context" will change for each subtest.
121+
skipVectorize();
122+
123+
final String query = "SET plannerStrategy = 'DECOUPLED';\n"
124+
+ " EXPLAIN PLAN FOR SELECT COUNT(*)\n"
125+
+ "FROM (\n"
126+
+ " SELECT DISTINCT dim2\n"
127+
+ " FROM druid.foo\n"
128+
+ " WHERE SUBSTRING(dim2, 1, 1) IN (\n"
129+
+ " SELECT SUBSTRING(dim1, 1, 1) FROM druid.foo WHERE dim1 IS NOT NULL\n"
130+
+ " )\n"
131+
+ ")";
132+
133+
final String explanation = "DruidAggregate(group=[{}], EXPR$0=[COUNT()], druid=[logical])\n" +
134+
" DruidAggregate(group=[{0}], druid=[logical])\n" +
135+
" DruidJoin(condition=[=($1, $2)], joinType=[inner])\n" +
136+
" DruidProject(dim2=[$2], $f1=[SUBSTRING($2, 1, 1)], druid=[logical])\n" +
137+
" DruidTableScan(table=[[druid, foo]], druid=[logical])\n" +
138+
" DruidAggregate(group=[{0}], druid=[logical])\n" +
139+
" DruidProject(EXPR$0=[SUBSTRING($1, 1, 1)], druid=[logical])\n" +
140+
" DruidFilter(condition=[IS NOT NULL($1)])\n" +
141+
" DruidTableScan(table=[[druid, foo]], druid=[logical])\n";
142+
final String resources = "[{\"name\":\"foo\",\"type\":\"DATASOURCE\"}]";
143+
final String attributes = "{\"statementType\":\"SELECT\"}";
144+
145+
testQuery(
146+
query,
147+
ImmutableList.of(),
148+
ImmutableList.of(new Object[]{explanation, resources, attributes})
149+
);
150+
}
151+
152+
@Test
153+
public void testMultiStatementSetsContextOverridesQueryContext()
154+
{
155+
// Skip vectorization since otherwise the "context" will change for each subtest.
156+
skipVectorize();
157+
158+
final String query = "SET plannerStrategy = 'DECOUPLED';\n"
159+
+ " SET timeout = 90000;\n"
160+
+ " EXPLAIN PLAN FOR \n"
161+
+ "SELECT COUNT(*)\n"
162+
+ "FROM (\n"
163+
+ " SELECT DISTINCT dim2\n"
164+
+ " FROM druid.foo\n"
165+
+ " WHERE SUBSTRING(dim2, 1, 1) IN (\n"
166+
+ " SELECT SUBSTRING(dim1, 1, 1) FROM druid.foo WHERE dim1 IS NOT NULL\n"
167+
+ " )\n"
168+
+ ")";
169+
170+
final String explanation = "DruidAggregate(group=[{}], EXPR$0=[COUNT()], druid=[logical])\n" +
171+
" DruidAggregate(group=[{0}], druid=[logical])\n" +
172+
" DruidJoin(condition=[=($1, $2)], joinType=[inner])\n" +
173+
" DruidProject(dim2=[$2], $f1=[SUBSTRING($2, 1, 1)], druid=[logical])\n" +
174+
" DruidTableScan(table=[[druid, foo]], druid=[logical])\n" +
175+
" DruidAggregate(group=[{0}], druid=[logical])\n" +
176+
" DruidProject(EXPR$0=[SUBSTRING($1, 1, 1)], druid=[logical])\n" +
177+
" DruidFilter(condition=[IS NOT NULL($1)])\n" +
178+
" DruidTableScan(table=[[druid, foo]], druid=[logical])\n";
179+
180+
final String resources = "[{\"name\":\"foo\",\"type\":\"DATASOURCE\"}]";
181+
final String attributes = "{\"statementType\":\"SELECT\"}";
182+
183+
testQuery(
184+
query,
185+
ImmutableMap.of("plannerStrategy", "COUPLED"),
186+
ImmutableList.of(),
187+
ImmutableList.of(new Object[]{explanation, resources, attributes})
188+
);
189+
}
190+
116191
@Test
117192
public void testExplainSelectStar()
118193
{

sql/src/test/java/org/apache/druid/sql/calcite/CalciteInsertDmlTest.java

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -901,6 +901,50 @@ public void testExplainPlanInsertJoinQuery()
901901
didTest = true;
902902
}
903903

904+
@Test
905+
public void testSetStatementWithExplainPlanInsertJoinQuery()
906+
{
907+
skipVectorize();
908+
909+
final String resources = "[{\"name\":\"dst\",\"type\":\"DATASOURCE\"},{\"name\":\"foo\",\"type\":\"DATASOURCE\"}]";
910+
final String attributes = "{\"statementType\":\"INSERT\",\"targetDataSource\":\"dst\",\"partitionedBy\":\"DAY\",\"clusteredBy\":[\"floor_m1\",\"dim1\",\"CEIL(\\\"m2\\\")\"]}";
911+
912+
final String sql = "SET plannerStrategy = 'DECOUPLED';\n"
913+
+ " SET timeout = 9000;\n"
914+
+ "EXPLAIN PLAN FOR \n"
915+
+ "INSERT INTO druid.dst \n"
916+
+ "SELECT __time, FLOOR(m1) as floor_m1, dim1, CEIL(m2) as ceil_m2 FROM foo \n"
917+
+ "PARTITIONED BY FLOOR(__time TO DAY) CLUSTERED BY 2, dim1, CEIL(m2)";
918+
919+
// Test correctness of the query when only the CLUSTERED BY clause is present
920+
final String explanation =
921+
"DruidSort(sort0=[$1], sort1=[$2], sort2=[$3], dir0=[ASC], dir1=[ASC], dir2=[ASC], druid=[logical])\n" +
922+
" DruidProject(__time=[$0], floor_m1=[FLOOR($5)], dim1=[$1], ceil_m2=[CEIL($6)], druid=[logical])\n" +
923+
" DruidTableScan(table=[[druid, foo]], druid=[logical])\n";
924+
925+
testQuery(
926+
PLANNER_CONFIG_NATIVE_QUERY_EXPLAIN,
927+
ImmutableMap.of("sqlQueryId", "dummy"),
928+
Collections.emptyList(),
929+
sql,
930+
CalciteTests.SUPER_USER_AUTH_RESULT,
931+
ImmutableList.of(),
932+
new DefaultResultsVerifier(
933+
ImmutableList.of(
934+
new Object[]{
935+
explanation,
936+
resources,
937+
attributes
938+
}
939+
),
940+
null
941+
)
942+
);
943+
944+
// Not using testIngestionQuery, so must set didTest manually to satisfy the check in tearDown.
945+
didTest = true;
946+
}
947+
904948
@Test
905949
public void testExplainPlanInsertWithClusteredByDescThrowsException()
906950
{

0 commit comments

Comments
 (0)