diff --git a/baremaps-calcite/pom.xml b/baremaps-calcite/pom.xml index e0ec8d319..b54f87e80 100644 --- a/baremaps-calcite/pom.xml +++ b/baremaps-calcite/pom.xml @@ -27,8 +27,6 @@ limitations under the License. Apache Baremaps Calcite - 21 - 21 UTF-8 @@ -85,10 +83,94 @@ limitations under the License. org.apache.calcite calcite-core - - org.apache.calcite - calcite-server - + + + + org.apache.maven.plugins + maven-deploy-plugin + 2.8.2 + + false + + + + maven-resources-plugin + + + copy-fmpp-resources + + copy-resources + + initialize + + ${project.build.directory}/codegen + + + src/main/codegen + false + + + + + + + + org.apache.maven.plugins + maven-antrun-plugin + + + net.sourceforge.fmpp + fmpp + 0.9.16 + + + + + generate-fmpp-sources + + run + + initialize + + + + + + + + + + + org.codehaus.mojo + javacc-maven-plugin + 3.1.1 + + + javacc + + javacc + + + ${project.build.directory}/generated-sources/fmpp + ${project.build.directory}/generated-sources/calcite + + **/Parser.jj + + 2 + false + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + + org.apache.baremaps.calcite.sql + + + + diff --git a/baremaps-calcite/src/main/codegen/config.fmpp b/baremaps-calcite/src/main/codegen/config.fmpp new file mode 100644 index 000000000..ffa062bc4 --- /dev/null +++ b/baremaps-calcite/src/main/codegen/config.fmpp @@ -0,0 +1,108 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +data: { + # Data declarations for this parser. + # + # Default declarations are in default_config.fmpp; if you do not include a + # declaration ('imports' or 'nonReservedKeywords', for example) in this file, + # FMPP will use the declaration from default_config.fmpp. + parser: { + # Generated parser implementation class package and name + package: "org.apache.baremaps.calcite.sql", + class: "BaremapsSqlDdlParser", + + # List of import statements. + imports: [ + "org.apache.calcite.schema.ColumnStrategy" + "org.apache.calcite.sql.SqlBasicCall" + "org.apache.calcite.sql.SqlCreate" + "org.apache.calcite.sql.SqlDrop" + "org.apache.calcite.sql.SqlTruncate" + "org.apache.calcite.sql.ddl.SqlCreateTableLike" + "org.apache.baremaps.calcite.ddl.SqlDdlNodes" + ] + + # List of new keywords. Example: "DATABASES", "TABLES". If the keyword is + # not a reserved keyword, add it to the 'nonReservedKeywords' section. + keywords: [ + "IF" + "MATERIALIZED" + "STORED" + "VIRTUAL" + "JAR" + "FILE" + "ARCHIVE" + ] + + # List of non-reserved keywords to add; + # items in this list become non-reserved + nonReservedKeywordsToAdd: [ + # not in core, added in server + "IF" + "MATERIALIZED" + "STORED" + "VIRTUAL" + "JAR" + "FILE" + "ARCHIVE" + ] + + # List of methods for parsing extensions to "CREATE [OR REPLACE]" calls. + # Each must accept arguments "(SqlParserPos pos, boolean replace)". + # Example: "SqlCreateForeignSchema". + createStatementParserMethods: [ + "SqlCreateForeignSchema" + "SqlCreateMaterializedView" + "SqlCreateSchema" + "SqlCreateTable" + "SqlCreateType" + "SqlCreateView" + "SqlCreateFunction" + ] + + # List of methods for parsing extensions to "DROP" calls. + # Each must accept arguments "(SqlParserPos pos)". + # Example: "SqlDropSchema". + dropStatementParserMethods: [ + "SqlDropMaterializedView" + "SqlDropSchema" + "SqlDropTable" + "SqlDropType" + "SqlDropView" + "SqlDropFunction" + ] + + # List of methods for parsing extensions to "TRUNCATE" calls. + # Each must accept arguments "(SqlParserPos pos)". + # Example: "SqlTruncateTable". + truncateStatementParserMethods: [ + "SqlTruncateTable" + ] + + # List of files in @includes directory that have parser method + # implementations for parsing custom SQL statements, literals or types + # given as part of "statementParserMethods", "literalParserMethods" or + # "dataTypeParserMethods". + # Example: "parserImpls.ftl". + implementationFiles: [ + "parserImpls.ftl" + ] + } +} + +freemarkerLinks: { + includes: includes/ +} \ No newline at end of file diff --git a/baremaps-calcite/src/main/codegen/default_config.fmpp b/baremaps-calcite/src/main/codegen/default_config.fmpp new file mode 100644 index 000000000..f3c048bd5 --- /dev/null +++ b/baremaps-calcite/src/main/codegen/default_config.fmpp @@ -0,0 +1,462 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to you under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default data declarations for parsers. +# Each of these may be overridden in a parser's config.fmpp file. +# In addition, each parser must define "package" and "class". +parser: { + # List of additional classes and packages to import. + # Example: "org.apache.calcite.sql.*", "java.util.List". + imports: [ + ] + + # List of new keywords. Example: "DATABASES", "TABLES". If the keyword is + # not a reserved keyword, add it to the 'nonReservedKeywords' section. + keywords: [ + ] + + # List of keywords from "keywords" section that are not reserved. + nonReservedKeywords: [ + "A" + "ABSENT" + "ABSOLUTE" + "ACTION" + "ADA" + "ADD" + "ADMIN" + "AFTER" + "ALWAYS" + "APPLY" + "ARRAY_AGG" + "ARRAY_CONCAT_AGG" + "ASC" + "ASSERTION" + "ASSIGNMENT" + "ATTRIBUTE" + "ATTRIBUTES" + "BEFORE" + "BERNOULLI" + "BREADTH" + "C" + "CASCADE" + "CATALOG" + "CATALOG_NAME" + "CENTURY" + "CHAIN" + "CHARACTERISTICS" + "CHARACTERS" + "CHARACTER_SET_CATALOG" + "CHARACTER_SET_NAME" + "CHARACTER_SET_SCHEMA" + "CLASS_ORIGIN" + "COBOL" + "COLLATION" + "COLLATION_CATALOG" + "COLLATION_NAME" + "COLLATION_SCHEMA" + "COLUMN_NAME" + "COMMAND_FUNCTION" + "COMMAND_FUNCTION_CODE" + "COMMITTED" + "CONDITIONAL" + "CONDITION_NUMBER" + "CONNECTION" + "CONNECTION_NAME" + "CONSTRAINT_CATALOG" + "CONSTRAINT_NAME" + "CONSTRAINTS" + "CONSTRAINT_SCHEMA" + "CONSTRUCTOR" + "CONTAINS_SUBSTR" + "CONTINUE" + "CURSOR_NAME" + "DATA" + "DATABASE" + "DATE_DIFF" + "DATE_TRUNC" + "DATETIME_DIFF" + "DATETIME_INTERVAL_CODE" + "DATETIME_INTERVAL_PRECISION" + "DATETIME_TRUNC" + "DAYOFWEEK" + "DAYOFYEAR" + "DAYS" + "DECADE" + "DEFAULTS" + "DEFERRABLE" + "DEFERRED" + "DEFINED" + "DEFINER" + "DEGREE" + "DEPTH" + "DERIVED" + "DESC" + "DESCRIPTION" + "DESCRIPTOR" + "DIAGNOSTICS" + "DISPATCH" + "DOMAIN" + "DOW" + "DOY" + "DOT_FORMAT" + "DYNAMIC_FUNCTION" + "DYNAMIC_FUNCTION_CODE" + "ENCODING" + "EPOCH" + "ERROR" + "EXCEPTION" + "EXCLUDE" + "EXCLUDING" + "FINAL" + "FIRST" + "FOLLOWING" + "FORMAT" + "FORTRAN" + "FOUND" + "FRAC_SECOND" + "G" + "GENERAL" + "GENERATED" + "GEOMETRY" + "GO" + "GOTO" + "GRANTED" + "GROUP_CONCAT" + "HIERARCHY" + "HOP" + "HOURS" + "IGNORE" + "ILIKE" + "IMMEDIATE" + "IMMEDIATELY" + "IMPLEMENTATION" + "INCLUDE" + "INCLUDING" + "INCREMENT" + "INITIALLY" + "INPUT" + "INSTANCE" + "INSTANTIABLE" + "INVOKER" + "ISODOW" + "ISOLATION" + "ISOYEAR" + "JAVA" + "JSON" + "K" + "KEY" + "KEY_MEMBER" + "KEY_TYPE" + "LABEL" + "LAST" + "LENGTH" + "LEVEL" + "LIBRARY" + "LOCATOR" + "M" + "MAP" + "MATCHED" + "MAXVALUE" + "MESSAGE_LENGTH" + "MESSAGE_OCTET_LENGTH" + "MESSAGE_TEXT" + "MICROSECOND" + "MILLENNIUM" + "MILLISECOND" + "MINUTES" + "MINVALUE" + "MONTHS" + "MORE_" + "MUMPS" + "NAME" + "NAMES" + "NANOSECOND" + "NESTING" + "NORMALIZED" + "NULLABLE" + "NULLS" + "NUMBER" + "OBJECT" + "OCTETS" + "OPTION" + "OPTIONS" + "ORDERING" + "ORDINALITY" + "OTHERS" + "OUTPUT" + "OVERRIDING" + "PAD" + "PARAMETER_MODE" + "PARAMETER_NAME" + "PARAMETER_ORDINAL_POSITION" + "PARAMETER_SPECIFIC_CATALOG" + "PARAMETER_SPECIFIC_NAME" + "PARAMETER_SPECIFIC_SCHEMA" + "PARTIAL" + "PASCAL" + "PASSING" + "PASSTHROUGH" + "PAST" + "PATH" + "PIVOT" + "PLACING" + "PLAN" + "PLI" + "PRECEDING" + "PRESERVE" + "PRIOR" + "PRIVILEGES" + "PUBLIC" + "QUARTER" + "QUARTERS" + "READ" + "RELATIVE" + "REPEATABLE" + "REPLACE" + "RESPECT" + "RESTART" + "RESTRICT" + "RETURNED_CARDINALITY" + "RETURNED_LENGTH" + "RETURNED_OCTET_LENGTH" + "RETURNED_SQLSTATE" + "RETURNING" + "RLIKE" + "ROLE" + "ROUTINE" + "ROUTINE_CATALOG" + "ROUTINE_NAME" + "ROUTINE_SCHEMA" + "ROW_COUNT" + "SCALAR" + "SCALE" + "SCHEMA" + "SCHEMA_NAME" + "SCOPE_CATALOGS" + "SCOPE_NAME" + "SCOPE_SCHEMA" + "SECONDS" + "SECTION" + "SECURITY" + "SELF" + "SEPARATOR" + "SEQUENCE" + "SERIALIZABLE" + "SERVER" + "SERVER_NAME" + "SESSION" + "SETS" + "SIMPLE" + "SIZE" + "SOURCE" + "SPACE" + "SPECIFIC_NAME" + "SQL_BIGINT" + "SQL_BINARY" + "SQL_BIT" + "SQL_BLOB" + "SQL_BOOLEAN" + "SQL_CHAR" + "SQL_CLOB" + "SQL_DATE" + "SQL_DECIMAL" + "SQL_DOUBLE" + "SQL_FLOAT" + "SQL_INTEGER" + "SQL_INTERVAL_DAY" + "SQL_INTERVAL_DAY_TO_HOUR" + "SQL_INTERVAL_DAY_TO_MINUTE" + "SQL_INTERVAL_DAY_TO_SECOND" + "SQL_INTERVAL_HOUR" + "SQL_INTERVAL_HOUR_TO_MINUTE" + "SQL_INTERVAL_HOUR_TO_SECOND" + "SQL_INTERVAL_MINUTE" + "SQL_INTERVAL_MINUTE_TO_SECOND" + "SQL_INTERVAL_MONTH" + "SQL_INTERVAL_SECOND" + "SQL_INTERVAL_YEAR" + "SQL_INTERVAL_YEAR_TO_MONTH" + "SQL_LONGVARBINARY" + "SQL_LONGVARCHAR" + "SQL_LONGVARNCHAR" + "SQL_NCHAR" + "SQL_NCLOB" + "SQL_NUMERIC" + "SQL_NVARCHAR" + "SQL_REAL" + "SQL_SMALLINT" + "SQL_TIME" + "SQL_TIMESTAMP" + "SQL_TINYINT" + "SQL_TSI_DAY" + "SQL_TSI_FRAC_SECOND" + "SQL_TSI_HOUR" + "SQL_TSI_MICROSECOND" + "SQL_TSI_MINUTE" + "SQL_TSI_MONTH" + "SQL_TSI_QUARTER" + "SQL_TSI_SECOND" + "SQL_TSI_WEEK" + "SQL_TSI_YEAR" + "SQL_VARBINARY" + "SQL_VARCHAR" + "STATE" + "STATEMENT" + "STRING_AGG" + "STRUCTURE" + "STYLE" + "SUBCLASS_ORIGIN" + "SUBSTITUTE" + "TABLE_NAME" + "TEMPORARY" + "TIES" + "TIME_DIFF" + "TIME_TRUNC" + "TIMESTAMPADD" + "TIMESTAMPDIFF" + "TIMESTAMP_DIFF" + "TIMESTAMP_TRUNC" + "TOP_LEVEL_COUNT" + "TRANSACTION" + "TRANSACTIONS_ACTIVE" + "TRANSACTIONS_COMMITTED" + "TRANSACTIONS_ROLLED_BACK" + "TRANSFORM" + "TRANSFORMS" + "TRIGGER_CATALOG" + "TRIGGER_NAME" + "TRIGGER_SCHEMA" + "TUMBLE" + "TYPE" + "UNBOUNDED" + "UNCOMMITTED" + "UNCONDITIONAL" + "UNDER" + "UNPIVOT" + "UNNAMED" + "USAGE" + "USER_DEFINED_TYPE_CATALOG" + "USER_DEFINED_TYPE_CODE" + "USER_DEFINED_TYPE_NAME" + "USER_DEFINED_TYPE_SCHEMA" + "UTF16" + "UTF32" + "UTF8" + "VERSION" + "VIEW" + "WEEK" + "WEEKS" + "WORK" + "WRAPPER" + "WRITE" + "XML" + "YEARS" + "ZONE" + ] + + # List of non-reserved keywords to add; + # items in this list become non-reserved. + nonReservedKeywordsToAdd: [ + ] + + # List of non-reserved keywords to remove; + # items in this list become reserved. + nonReservedKeywordsToRemove: [ + ] + + # List of additional join types. Each is a method with no arguments. + # Example: "LeftSemiJoin". + joinTypes: [ + ] + + # List of methods for parsing custom SQL statements. + # Return type of method implementation should be 'SqlNode'. + # Example: "SqlShowDatabases()", "SqlShowTables()". + statementParserMethods: [ + ] + + # List of methods for parsing custom literals. + # Return type of method implementation should be "SqlNode". + # Example: ParseJsonLiteral(). + literalParserMethods: [ + ] + + # List of methods for parsing custom data types. + # Return type of method implementation should be "SqlTypeNameSpec". + # Example: SqlParseTimeStampZ(). + dataTypeParserMethods: [ + ] + + # List of methods for parsing builtin function calls. + # Return type of method implementation should be "SqlNode". + # Example: "DateTimeConstructorCall()". + builtinFunctionCallMethods: [ + ] + + # List of methods for parsing extensions to "ALTER " calls. + # Each must accept arguments "(SqlParserPos pos, String scope)". + # Example: "SqlAlterTable". + alterStatementParserMethods: [ + ] + + # List of methods for parsing extensions to "CREATE [OR REPLACE]" calls. + # Each must accept arguments "(SqlParserPos pos, boolean replace)". + # Example: "SqlCreateForeignSchema". + createStatementParserMethods: [ + ] + + # List of methods for parsing extensions to "DROP" calls. + # Each must accept arguments "(SqlParserPos pos)". + # Example: "SqlDropSchema". + dropStatementParserMethods: [ + ] + + # List of methods for parsing extensions to "TRUNCATE" calls. + # Each must accept arguments "(SqlParserPos pos)". + # Example: "SqlTruncate". + truncateStatementParserMethods: [ + ] + + # Binary operators tokens. + # Example: "< INFIX_CAST: \"::\" >". + binaryOperatorsTokens: [ + ] + + # Binary operators initialization. + # Example: "InfixCast". + extraBinaryExpressions: [ + ] + + # List of files in @includes directory that have parser method + # implementations for parsing custom SQL statements, literals or types + # given as part of "statementParserMethods", "literalParserMethods" or + # "dataTypeParserMethods". + # Example: "parserImpls.ftl". + implementationFiles: [ + ] + + # Custom identifier token. + # Example: "< IDENTIFIER: (|)+ >". + customIdentifierToken: "" + + # Method for parsing "SET [OR RESET]" calls. + setOptionParserMethod: "SqlSetOption" + + includePosixOperators: false + includeCompoundIdentifier: true + includeBraces: true + includeAdditionalDeclarations: false + includeParsingStringLiteralAsArrayLiteral: false +} diff --git a/baremaps-calcite/src/main/codegen/includes/parserImpls.ftl b/baremaps-calcite/src/main/codegen/includes/parserImpls.ftl new file mode 100644 index 000000000..6cfe06d0f --- /dev/null +++ b/baremaps-calcite/src/main/codegen/includes/parserImpls.ftl @@ -0,0 +1,526 @@ +<#-- +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to you under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +--> + +boolean IfNotExistsOpt() : +{ +} +{ + { return true; } +| + { return false; } +} + +boolean IfExistsOpt() : +{ +} +{ + { return true; } +| + { return false; } +} + +SqlCreate SqlCreateSchema(Span s, boolean replace) : +{ + final boolean ifNotExists; + final SqlIdentifier id; +} +{ + ifNotExists = IfNotExistsOpt() id = CompoundIdentifier() + { + return SqlDdlNodes.createSchema(s.end(this), replace, ifNotExists, id); + } +} + +SqlCreate SqlCreateForeignSchema(Span s, boolean replace) : +{ + final boolean ifNotExists; + final SqlIdentifier id; + SqlNode type = null; + SqlNode library = null; + SqlNodeList optionList = null; +} +{ + ifNotExists = IfNotExistsOpt() id = CompoundIdentifier() + ( + type = StringLiteral() + | + library = StringLiteral() + ) + [ optionList = Options() ] + { + return SqlDdlNodes.createForeignSchema(s.end(this), replace, + ifNotExists, id, type, library, optionList); + } +} + +SqlNodeList Options() : +{ + final Span s; + final List list = new ArrayList(); +} +{ + { s = span(); } + [ + Option(list) + ( + + Option(list) + )* + ] + { + return new SqlNodeList(list, s.end(this)); + } +} + +void Option(List list) : +{ + final SqlIdentifier id; + final SqlNode value; +} +{ + id = SimpleIdentifier() + value = Literal() { + list.add(id); + list.add(value); + } +} + +SqlNodeList TableElementList() : +{ + final Span s; + final List list = new ArrayList(); +} +{ + { s = span(); } + TableElement(list) + ( + TableElement(list) + )* + { + return new SqlNodeList(list, s.end(this)); + } +} + +void TableElement(List list) : +{ + final SqlIdentifier id; + final SqlDataTypeSpec type; + final boolean nullable; + final SqlNode e; + SqlIdentifier name = null; + final SqlNodeList columnList; + final Span s = Span.of(); + final ColumnStrategy strategy; +} +{ + LOOKAHEAD(2) id = SimpleIdentifier() + ( + type = DataType() + nullable = NullableOptDefaultTrue() + ( + [ ] + e = Expression(ExprContext.ACCEPT_SUB_QUERY) + ( + { strategy = ColumnStrategy.VIRTUAL; } + | + { strategy = ColumnStrategy.STORED; } + | + { strategy = ColumnStrategy.VIRTUAL; } + ) + | + e = Expression(ExprContext.ACCEPT_SUB_QUERY) { + strategy = ColumnStrategy.DEFAULT; + } + | + { + e = null; + strategy = nullable ? ColumnStrategy.NULLABLE + : ColumnStrategy.NOT_NULLABLE; + } + ) + { + list.add( + SqlDdlNodes.column(s.add(id).end(this), id, + type.withNullable(nullable), e, strategy)); + } + | + { list.add(id); } + ) +| + id = SimpleIdentifier() { + list.add(id); + } +| + [ { s.add(this); } name = SimpleIdentifier() ] + ( + { s.add(this); } + e = Expression(ExprContext.ACCEPT_SUB_QUERY) { + list.add(SqlDdlNodes.check(s.end(this), name, e)); + } + | + { s.add(this); } + columnList = ParenthesizedSimpleIdentifierList() { + list.add(SqlDdlNodes.unique(s.end(columnList), name, columnList)); + } + | + { s.add(this); } + columnList = ParenthesizedSimpleIdentifierList() { + list.add(SqlDdlNodes.primary(s.end(columnList), name, columnList)); + } + ) +} + +SqlNodeList AttributeDefList() : +{ + final Span s; + final List list = new ArrayList(); +} +{ + { s = span(); } + AttributeDef(list) + ( + AttributeDef(list) + )* + { + return new SqlNodeList(list, s.end(this)); + } +} + +void AttributeDef(List list) : +{ + final SqlIdentifier id; + final SqlDataTypeSpec type; + final boolean nullable; + SqlNode e = null; + final Span s = Span.of(); +} +{ + id = SimpleIdentifier() + ( + type = DataType() + nullable = NullableOptDefaultTrue() + ) + [ e = Expression(ExprContext.ACCEPT_SUB_QUERY) ] + { + list.add(SqlDdlNodes.attribute(s.add(id).end(this), id, + type.withNullable(nullable), e, null)); + } +} + +SqlCreate SqlCreateType(Span s, boolean replace) : +{ + final SqlIdentifier id; + SqlNodeList attributeDefList = null; + SqlDataTypeSpec type = null; +} +{ + + id = CompoundIdentifier() + + ( + attributeDefList = AttributeDefList() + | + type = DataType() + ) + { + return SqlDdlNodes.createType(s.end(this), replace, id, attributeDefList, type); + } +} + +SqlNodeList WithOptions() : +{ + SqlNodeList list; +} +{ + list = OptionList() { return list; } +} + +SqlNodeList OptionList() : +{ + List options = new ArrayList<>(); + SqlNode option; +} +{ + option = OptionAssignment() { options.add(option); } + ( option = OptionAssignment() { options.add(option); } )* + { + return new SqlNodeList(options, + Span.of(options.get(0), options.get(options.size() - 1)).pos()); + } +} + +SqlNode OptionAssignment() : +{ + SqlIdentifier key; + SqlNode value; +} +{ + key = CompoundIdentifier() value = Literal() { + return new SqlBasicCall( + SqlStdOperatorTable.EQUALS, + new SqlNode[] { key, value }, + Span.of(key, value).pos()); + } +} + +SqlCreate SqlCreateTable(Span s, boolean replace) : +{ + final boolean ifNotExists; + final SqlIdentifier id; + SqlNodeList tableElementList = null; + SqlNode query = null; + SqlNodeList withOptions = null; + SqlCreate createTableLike = null; +} +{ + ifNotExists = IfNotExistsOpt() id = CompoundIdentifier() + ( + createTableLike = SqlCreateTableLike(s, replace, ifNotExists, id) { + return createTableLike; + } + | + [ tableElementList = TableElementList() ] + [ query = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY) ] + [ withOptions = WithOptions() ] + { + return SqlDdlNodes.createTable(s.end(this), replace, ifNotExists, id, tableElementList, query, withOptions); + } + ) +} + +SqlCreate SqlCreateTableLike(Span s, boolean replace, boolean ifNotExists, SqlIdentifier id) : +{ + final SqlIdentifier sourceTable; + final boolean likeOptions; + final SqlNodeList including = new SqlNodeList(getPos()); + final SqlNodeList excluding = new SqlNodeList(getPos()); +} +{ + sourceTable = CompoundIdentifier() + [ LikeOptions(including, excluding) ] + { + return SqlDdlNodes.createTableLike(s.end(this), replace, ifNotExists, id, sourceTable, including, excluding); + } +} + +void LikeOptions(SqlNodeList including, SqlNodeList excluding) : +{ +} +{ + LikeOption(including, excluding) + ( + LikeOption(including, excluding) + )* +} + +void LikeOption(SqlNodeList includingOptions, SqlNodeList excludingOptions) : +{ + boolean including = false; + SqlCreateTableLike.LikeOption option; +} +{ + ( + { including = true; } + | + { including = false; } + ) + ( + { option = SqlCreateTableLike.LikeOption.ALL; } + | + { option = SqlCreateTableLike.LikeOption.DEFAULTS; } + | + { option = SqlCreateTableLike.LikeOption.GENERATED; } + ) + { + if (including) { + includingOptions.add(option.symbol(getPos())); + } else { + excludingOptions.add(option.symbol(getPos())); + } + } +} + +SqlCreate SqlCreateView(Span s, boolean replace) : +{ + final SqlIdentifier id; + SqlNodeList columnList = null; + final SqlNode query; +} +{ + id = CompoundIdentifier() + [ columnList = ParenthesizedSimpleIdentifierList() ] + query = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY) { + return SqlDdlNodes.createView(s.end(this), replace, id, columnList, + query); + } +} + +SqlCreate SqlCreateMaterializedView(Span s, boolean replace) : +{ + final boolean ifNotExists; + final SqlIdentifier id; + SqlNodeList columnList = null; + final SqlNode query; +} +{ + ifNotExists = IfNotExistsOpt() + id = CompoundIdentifier() + [ columnList = ParenthesizedSimpleIdentifierList() ] + query = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY) { + return SqlDdlNodes.createMaterializedView(s.end(this), replace, + ifNotExists, id, columnList, query); + } +} + +private void FunctionJarDef(SqlNodeList usingList) : +{ + final SqlDdlNodes.FileType fileType; + final SqlNode uri; +} +{ + ( + { fileType = SqlDdlNodes.FileType.ARCHIVE; } + | + { fileType = SqlDdlNodes.FileType.FILE; } + | + { fileType = SqlDdlNodes.FileType.JAR; } + ) { + usingList.add(SqlLiteral.createSymbol(fileType, getPos())); + } + uri = StringLiteral() { + usingList.add(uri); + } +} + +SqlCreate SqlCreateFunction(Span s, boolean replace) : +{ + final boolean ifNotExists; + final SqlIdentifier id; + final SqlNode className; + SqlNodeList usingList = SqlNodeList.EMPTY; +} +{ + ifNotExists = IfNotExistsOpt() + id = CompoundIdentifier() + + className = StringLiteral() + [ + { + usingList = new SqlNodeList(getPos()); + } + FunctionJarDef(usingList) + ( + + FunctionJarDef(usingList) + )* + ] { + return SqlDdlNodes.createFunction(s.end(this), replace, ifNotExists, + id, className, usingList); + } +} + +SqlDrop SqlDropSchema(Span s, boolean replace) : +{ + final boolean ifExists; + final SqlIdentifier id; + final boolean foreign; +} +{ + ( + { foreign = true; } + | + { foreign = false; } + ) + ifExists = IfExistsOpt() id = CompoundIdentifier() { + return SqlDdlNodes.dropSchema(s.end(this), foreign, ifExists, id); + } +} + +SqlDrop SqlDropType(Span s, boolean replace) : +{ + final boolean ifExists; + final SqlIdentifier id; +} +{ + ifExists = IfExistsOpt() id = CompoundIdentifier() { + return SqlDdlNodes.dropType(s.end(this), ifExists, id); + } +} + +SqlDrop SqlDropTable(Span s, boolean replace) : +{ + final boolean ifExists; + final SqlIdentifier id; +} +{ +
ifExists = IfExistsOpt() id = CompoundIdentifier() { + return SqlDdlNodes.dropTable(s.end(this), ifExists, id); + } +} + +SqlTruncate SqlTruncateTable(Span s) : +{ + final SqlIdentifier id; + final boolean continueIdentity; +} +{ +
id = CompoundIdentifier() + ( + { continueIdentity = true; } + | + { continueIdentity = false; } + | + { continueIdentity = true; } + ) + { + return SqlDdlNodes.truncateTable(s.end(this), id, continueIdentity); + } +} + +SqlDrop SqlDropView(Span s, boolean replace) : +{ + final boolean ifExists; + final SqlIdentifier id; +} +{ + ifExists = IfExistsOpt() id = CompoundIdentifier() { + return SqlDdlNodes.dropView(s.end(this), ifExists, id); + } +} + +SqlDrop SqlDropMaterializedView(Span s, boolean replace) : +{ + final boolean ifExists; + final SqlIdentifier id; +} +{ + ifExists = IfExistsOpt() id = CompoundIdentifier() { + return SqlDdlNodes.dropMaterializedView(s.end(this), ifExists, id); + } +} + +SqlDrop SqlDropFunction(Span s, boolean replace) : +{ + final boolean ifExists; + final SqlIdentifier id; +} +{ + ifExists = IfExistsOpt() + id = CompoundIdentifier() { + return SqlDdlNodes.dropFunction(s.end(this), ifExists, id); + } +} \ No newline at end of file diff --git a/baremaps-calcite/src/main/codegen/templates/Parser.jj b/baremaps-calcite/src/main/codegen/templates/Parser.jj new file mode 100644 index 000000000..64cff9bea --- /dev/null +++ b/baremaps-calcite/src/main/codegen/templates/Parser.jj @@ -0,0 +1,9214 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +<@pp.dropOutputFile /> + +<@pp.changeOutputFile name="javacc/Parser.jj" /> + +options { + STATIC = false; + IGNORE_CASE = true; + UNICODE_INPUT = true; +} + + +PARSER_BEGIN(${parser.class}) + +package ${parser.package}; + +<#list (parser.imports!default.parser.imports) as importStr> +import ${importStr}; + + +import org.apache.calcite.avatica.util.Casing; +import org.apache.calcite.avatica.util.TimeUnit; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.runtime.CalciteContextException; +import org.apache.calcite.sql.JoinConditionType; +import org.apache.calcite.sql.JoinType; +import org.apache.calcite.sql.SqlAlter; +import org.apache.calcite.sql.SqlAsofJoin; +import org.apache.calcite.sql.SqlBasicTypeNameSpec; +import org.apache.calcite.sql.SqlBinaryOperator; +import org.apache.calcite.sql.SqlCall; +import org.apache.calcite.sql.SqlCharStringLiteral; +import org.apache.calcite.sql.SqlCollation; +import org.apache.calcite.sql.SqlCollectionTypeNameSpec; +import org.apache.calcite.sql.SqlDataTypeSpec; +import org.apache.calcite.sql.SqlDelete; +import org.apache.calcite.sql.SqlDescribeSchema; +import org.apache.calcite.sql.SqlDescribeTable; +import org.apache.calcite.sql.SqlDynamicParam; +import org.apache.calcite.sql.SqlExplain; +import org.apache.calcite.sql.SqlExplainFormat; +import org.apache.calcite.sql.SqlExplainLevel; +import org.apache.calcite.sql.SqlFunction; +import org.apache.calcite.sql.SqlFunctionCategory; +import org.apache.calcite.sql.SqlHint; +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlInsert; +import org.apache.calcite.sql.SqlInsertKeyword; +import org.apache.calcite.sql.SqlIntervalQualifier; +import org.apache.calcite.sql.SqlJdbcDataTypeName; +import org.apache.calcite.sql.SqlJdbcFunctionCall; +import org.apache.calcite.sql.SqlJoin; +import org.apache.calcite.sql.SqlJsonConstructorNullClause; +import org.apache.calcite.sql.SqlJsonEncoding; +import org.apache.calcite.sql.SqlJsonExistsErrorBehavior; +import org.apache.calcite.sql.SqlJsonEmptyOrError; +import org.apache.calcite.sql.SqlJsonQueryEmptyOrErrorBehavior; +import org.apache.calcite.sql.SqlJsonQueryWrapperBehavior; +import org.apache.calcite.sql.SqlJsonValueEmptyOrErrorBehavior; +import org.apache.calcite.sql.SqlJsonValueReturning; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlLambda; +import org.apache.calcite.sql.SqlLiteral; +import org.apache.calcite.sql.SqlMatchRecognize; +import org.apache.calcite.sql.SqlMerge; +import org.apache.calcite.sql.SqlMapTypeNameSpec; +import org.apache.calcite.sql.SqlNode; +import org.apache.calcite.sql.SqlNodeList; +import org.apache.calcite.sql.SqlNumericLiteral; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.SqlOrderBy; +import org.apache.calcite.sql.SqlPivot; +import org.apache.calcite.sql.SqlPostfixOperator; +import org.apache.calcite.sql.SqlPrefixOperator; +import org.apache.calcite.sql.SqlRowTypeNameSpec; +import org.apache.calcite.sql.SqlSampleSpec; +import org.apache.calcite.sql.SqlSelect; +import org.apache.calcite.sql.SqlSelectKeyword; +import org.apache.calcite.sql.SqlSetOption; +import org.apache.calcite.sql.SqlSnapshot; +import org.apache.calcite.sql.SqlTableRef; +import org.apache.calcite.sql.SqlTypeNameSpec; +import org.apache.calcite.sql.SqlUnnestOperator; +import org.apache.calcite.sql.SqlUnpivot; +import org.apache.calcite.sql.SqlUpdate; +import org.apache.calcite.sql.SqlUserDefinedTypeNameSpec; +import org.apache.calcite.sql.SqlUtil; +import org.apache.calcite.sql.SqlWindow; +import org.apache.calcite.sql.SqlWith; +import org.apache.calcite.sql.SqlWithItem; +import org.apache.calcite.sql.fun.SqlCase; +import org.apache.calcite.sql.fun.SqlInternalOperators; +import org.apache.calcite.sql.fun.SqlLibraryOperators; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.sql.fun.SqlTrimFunction; +import org.apache.calcite.sql.parser.Span; +import org.apache.calcite.sql.parser.SqlAbstractParserImpl; +import org.apache.calcite.sql.parser.SqlParseException; +import org.apache.calcite.sql.parser.SqlParser; +import org.apache.calcite.sql.parser.SqlParserImplFactory; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.sql.parser.SqlParserUtil; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.sql.validate.SqlConformance; +import org.apache.calcite.sql.validate.SqlConformanceEnum; +import org.apache.calcite.util.Glossary; +import org.apache.calcite.util.Pair; +import org.apache.calcite.util.SourceStringReader; +import org.apache.calcite.util.Util; +import org.apache.calcite.util.trace.CalciteTrace; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import org.slf4j.Logger; + +import java.io.Reader; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.apache.calcite.util.Static.RESOURCE; + +/** + * SQL parser, generated from Parser.jj by JavaCC. + * + *

The public wrapper for this parser is {@link SqlParser}. + */ +public class ${parser.class} extends SqlAbstractParserImpl +{ + private static final Logger LOGGER = CalciteTrace.getParserTracer(); + + // Can't use quoted literal because of a bug in how JavaCC translates + // backslash-backslash. + private static final char BACKSLASH = 0x5c; + private static final char DOUBLE_QUOTE = 0x22; + private static final String DQ = DOUBLE_QUOTE + ""; + private static final String DQDQ = DQ + DQ; + private static final SqlLiteral LITERAL_ZERO = + SqlLiteral.createExactNumeric("0", SqlParserPos.ZERO); + private static final SqlLiteral LITERAL_ONE = + SqlLiteral.createExactNumeric("1", SqlParserPos.ZERO); + private static final SqlLiteral LITERAL_MINUS_ONE = + SqlLiteral.createExactNumeric("-1", SqlParserPos.ZERO); + private static final BigDecimal ONE_HUNDRED = BigDecimal.valueOf(100L); + + private static Metadata metadata; + + private Casing unquotedCasing; + private Casing quotedCasing; + private int identifierMaxLength; + private SqlConformance conformance; + + /** + * {@link SqlParserImplFactory} implementation for creating parser. + */ + public static final SqlParserImplFactory FACTORY = new SqlParserImplFactory() { + public SqlAbstractParserImpl getParser(Reader reader) { + final ${parser.class} parser = new ${parser.class}(reader); + if (reader instanceof SourceStringReader) { + final String sql = + ((SourceStringReader) reader).getSourceString(); + parser.setOriginalSql(sql); + } + return parser; + } + }; + + public SqlParseException normalizeException(Throwable ex) { + try { + if (ex instanceof ParseException) { + ex = cleanupParseException((ParseException) ex); + } + return convertException(ex); + } catch (ParseException e) { + throw new AssertionError(e); + } + } + + public Metadata getMetadata() { + synchronized (${parser.class}.class) { + if (metadata == null) { + metadata = new MetadataImpl( + new ${parser.class}(new java.io.StringReader(""))); + } + return metadata; + } + } + + public void setTabSize(int tabSize) { + jj_input_stream.setTabSize(tabSize); + } + + public void switchTo(SqlAbstractParserImpl.LexicalState state) { + final int stateOrdinal = + Arrays.asList(${parser.class}TokenManager.lexStateNames) + .indexOf(state.name()); + token_source.SwitchTo(stateOrdinal); + } + + public void setQuotedCasing(Casing quotedCasing) { + this.quotedCasing = quotedCasing; + } + + public void setUnquotedCasing(Casing unquotedCasing) { + this.unquotedCasing = unquotedCasing; + } + + public void setIdentifierMaxLength(int identifierMaxLength) { + this.identifierMaxLength = identifierMaxLength; + } + + public void setConformance(SqlConformance conformance) { + this.conformance = conformance; + } + + public SqlNode parseSqlExpressionEof() throws Exception { + return SqlExpressionEof(); + } + + public SqlNode parseSqlStmtEof() throws Exception { + return SqlStmtEof(); + } + + public SqlNodeList parseSqlStmtList() throws Exception { + return SqlStmtList(); + } + + public SqlNode parseArray() throws SqlParseException { + switchTo(LexicalState.BQID); + try { + return ArrayLiteral(); + } catch (ParseException ex) { + throw normalizeException(ex); + } catch (TokenMgrError ex) { + throw normalizeException(ex); + } + } + + private SqlNode extend(SqlNode table, SqlNodeList extendList) { + return SqlStdOperatorTable.EXTEND.createCall( + Span.of(table, extendList).pos(), table, extendList); + } + + /** Adds a warning that a token such as "HOURS" was used, + * whereas the SQL standard only allows "HOUR". + * + *

Currently, we silently add an exception to a list of warnings. In + * future, we may have better compliance checking, for example a strict + * compliance mode that throws if any non-standard features are used. */ + private TimeUnit warn(TimeUnit timeUnit) throws ParseException { + final String token = getToken(0).image.toUpperCase(Locale.ROOT); + warnings.add( + SqlUtil.newContextException(getPos(), + RESOURCE.nonStandardFeatureUsed(token))); + return timeUnit; + } +} + +PARSER_END(${parser.class}) + + +/*************************************** + * Utility Codes for Semantic Analysis * + ***************************************/ + +/* For Debug */ +JAVACODE +void debug_message1() { + LOGGER.info("{} , {}", getToken(0).image, getToken(1).image); +} + +JAVACODE String unquotedIdentifier() { + return SqlParserUtil.toCase(getToken(0).image, unquotedCasing); +} + +/** + * Allows parser to be extended with new types of table references. The + * default implementation of this production is empty. + */ +SqlNode ExtendedTableRef() : +{ +} +{ + UnusedExtension() + { + return null; + } +} + +/** + * Allows an OVER clause following a table expression as an extension to + * standard SQL syntax. The default implementation of this production is empty. + */ +SqlNode TableOverOpt() : +{ +} +{ + { + return null; + } +} + +/* + * Parses dialect-specific keywords immediately following the SELECT keyword. + */ +void SqlSelectKeywords(List keywords) : +{} +{ + E() +} + +/* + * Parses dialect-specific keywords immediately following the INSERT keyword. + */ +void SqlInsertKeywords(List keywords) : +{} +{ + E() +} + +/* +* Parse Floor/Ceil function parameters +*/ +SqlNode FloorCeilOptions(Span s, boolean floorFlag) : +{ + SqlNode node; +} +{ + node = StandardFloorCeilOptions(s, floorFlag) { + return node; + } +} + +/* +// This file contains the heart of a parser for SQL SELECT statements. +// code can be shared between various parsers (for example, a DDL parser and a +// DML parser) but is not a standalone JavaCC file. You need to prepend a +// parser declaration (such as that in Parser.jj). +*/ + +/* Epsilon */ +JAVACODE +void E() {} + +/** @Deprecated */ +JAVACODE List startList(Object o) +{ + List list = new ArrayList(); + list.add(o); + return list; +} + +/* + * NOTE jvs 6-Feb-2004: The straightforward way to implement the SQL grammar is + * to keep query expressions (SELECT, UNION, etc) separate from row expressions + * (+, LIKE, etc). However, this is not possible with an LL(k) parser, because + * both kinds of expressions allow parenthesization, so no fixed amount of left + * context is ever good enough. A sub-query can be a leaf in a row expression, + * and can include operators like UNION, so it's not even possible to use a + * syntactic lookahead rule like "look past an indefinite number of parentheses + * until you see SELECT, VALUES, or TABLE" (since at that point we still + * don't know whether we're parsing a sub-query like ((select ...) + x) + * vs. (select ... union select ...). + * + * The somewhat messy solution is to unify the two kinds of expression, + * and to enforce syntax rules using parameterized context. This + * is the purpose of the ExprContext parameter. It is passed to + * most expression productions, which check the expressions encountered + * against the context for correctness. When a query + * element like SELECT is encountered, the production calls + * checkQueryExpression, which will throw an exception if + * a row expression was expected instead. When a row expression like + * IN is encountered, the production calls checkNonQueryExpression + * instead. It is very important to understand how this works + * when modifying the grammar. + * + * The commingling of expressions results in some bogus ambiguities which are + * resolved with LOOKAHEAD hints. The worst example is comma. SQL allows both + * (WHERE x IN (1,2)) and (WHERE x IN (select ...)). This means when we parse + * the right-hand-side of an IN, we have to allow any kind of expression inside + * the parentheses. Now consider the expression "WHERE x IN(SELECT a FROM b + * GROUP BY c,d)". When the parser gets to "c,d" it doesn't know whether the + * comma indicates the end of the GROUP BY or the end of one item in an IN + * list. Luckily, we know that select and comma-list are mutually exclusive + * within IN, so we use maximal munch for the GROUP BY comma. However, this + * usage of hints could easily mask unintended ambiguities resulting from + * future changes to the grammar, making it very brittle. + */ + +JAVACODE protected SqlParserPos getPos() +{ + return new SqlParserPos( + token.beginLine, + token.beginColumn, + token.endLine, + token.endColumn); +} + +/** Starts a span at the current position. */ +JAVACODE Span span() +{ + return Span.of(getPos()); +} + +JAVACODE void checkQueryExpression(ExprContext exprContext) +{ + switch (exprContext) { + case ACCEPT_NON_QUERY: + case ACCEPT_SUB_QUERY: + case ACCEPT_CURSOR: + throw SqlUtil.newContextException(getPos(), + RESOURCE.illegalQueryExpression()); + } +} + +JAVACODE void checkNonQueryExpression(ExprContext exprContext) +{ + switch (exprContext) { + case ACCEPT_QUERY: + throw SqlUtil.newContextException(getPos(), + RESOURCE.illegalNonQueryExpression()); + } +} + +JAVACODE SqlNode checkNotJoin(SqlNode e) +{ + if (e instanceof SqlJoin) { + throw SqlUtil.newContextException(e.getParserPosition(), + RESOURCE.illegalJoinExpression()); + } + return e; +} + +/** + * Converts a ParseException (local to this particular instantiation + * of the parser) into a SqlParseException (common to all parsers). + */ +JAVACODE SqlParseException convertException(Throwable ex) +{ + if (ex instanceof SqlParseException) { + return (SqlParseException) ex; + } + SqlParserPos pos = null; + int[][] expectedTokenSequences = null; + String[] tokenImage = null; + if (ex instanceof ParseException) { + ParseException pex = (ParseException) ex; + expectedTokenSequences = pex.expectedTokenSequences; + tokenImage = pex.tokenImage; + if (pex.currentToken != null) { + final Token token = pex.currentToken.next; + // Checks token.image.equals("1") to avoid recursive call. + // The SqlAbstractParserImpl#MetadataImpl constructor uses constant "1" to + // throw intentionally to collect the expected tokens. + if (!token.image.equals("1") + && getMetadata().isKeyword(token.image) + && SqlParserUtil.allowsIdentifier(tokenImage, expectedTokenSequences)) { + // If the next token is a keyword, reformat the error message as: + + // Incorrect syntax near the keyword '{keyword}' at line {line_number}, + // column {column_number}. + final String expecting = ex.getMessage() + .substring(ex.getMessage().indexOf("Was expecting")); + final String errorMsg = String.format("Incorrect syntax near the keyword '%s' " + + "at line %d, column %d.\n%s", + token.image, + token.beginLine, + token.beginColumn, + expecting); + // Replace the ParseException with explicit error message. + ex = new ParseException(errorMsg); + } + pos = new SqlParserPos( + token.beginLine, + token.beginColumn, + token.endLine, + token.endColumn); + } + } else if (ex instanceof TokenMgrError) { + expectedTokenSequences = null; + tokenImage = null; + // Example: + // Lexical error at line 3, column 24. Encountered "#" after "a". + final java.util.regex.Pattern pattern = java.util.regex.Pattern.compile( + "(?s)Lexical error at line ([0-9]+), column ([0-9]+).*"); + java.util.regex.Matcher matcher = pattern.matcher(ex.getMessage()); + if (matcher.matches()) { + int line = Integer.parseInt(matcher.group(1)); + int column = Integer.parseInt(matcher.group(2)); + pos = new SqlParserPos(line, column, line, column); + } + } else if (ex instanceof CalciteContextException) { + // CalciteContextException is the standard wrapper for exceptions + // produced by the validator, but in the parser, the standard is + // SqlParseException; so, strip it away. In case you were wondering, + // the CalciteContextException appears because the parser + // occasionally calls into validator-style code such as + // SqlSpecialOperator.reduceExpr. + CalciteContextException ece = + (CalciteContextException) ex; + pos = new SqlParserPos( + ece.getPosLine(), + ece.getPosColumn(), + ece.getEndPosLine(), + ece.getEndPosColumn()); + ex = ece.getCause(); + } + + return new SqlParseException( + ex.getMessage(), pos, expectedTokenSequences, tokenImage, ex); +} + +/** + * Removes or transforms misleading information from a parse exception. + * + * @param e dirty excn + * + * @return clean excn + */ +JAVACODE ParseException cleanupParseException(ParseException ex) +{ + if (ex.expectedTokenSequences == null) { + return ex; + } + int iIdentifier = Arrays.asList(ex.tokenImage).indexOf(""); + + // Find all sequences in the error which contain identifier. For + // example, + // {} + // {A} + // {B, C} + // {D, } + // {D, A} + // {D, B} + // + // would yield + // {} + // {D} + final List prefixList = new ArrayList(); + for (int i = 0; i < ex.expectedTokenSequences.length; ++i) { + int[] seq = ex.expectedTokenSequences[i]; + int j = seq.length - 1; + int i1 = seq[j]; + if (i1 == iIdentifier) { + int[] prefix = new int[j]; + System.arraycopy(seq, 0, prefix, 0, j); + prefixList.add(prefix); + } + } + + if (prefixList.isEmpty()) { + return ex; + } + + int[][] prefixes = (int[][]) + prefixList.toArray(new int[prefixList.size()][]); + + // Since was one of the possible productions, + // we know that the parser will also have included all + // of the non-reserved keywords (which are treated as + // identifiers in non-keyword contexts). So, now we need + // to clean those out, since they're totally irrelevant. + + final List list = new ArrayList(); + Metadata metadata = getMetadata(); + for (int i = 0; i < ex.expectedTokenSequences.length; ++i) { + int [] seq = ex.expectedTokenSequences[i]; + String tokenImage = ex.tokenImage[seq[seq.length - 1]]; + String token = SqlParserUtil.getTokenVal(tokenImage); + if (token == null || !metadata.isNonReservedKeyword(token)) { + list.add(seq); + continue; + } + boolean match = matchesPrefix(seq, prefixes); + if (!match) { + list.add(seq); + } + } + + ex.expectedTokenSequences = + (int [][]) list.toArray(new int [list.size()][]); + return ex; +} + +JAVACODE boolean matchesPrefix(int[] seq, int[][] prefixes) +{ + nextPrefix: + for (int[] prefix : prefixes) { + if (seq.length == prefix.length + 1) { + for (int k = 0; k < prefix.length; k++) { + if (prefix[k] != seq[k]) { + continue nextPrefix; + } + } + return true; + } + } + return false; +} + +/***************************************** + * Syntactical Descriptions * + *****************************************/ + +SqlNode ExprOrJoinOrOrderedQuery(ExprContext exprContext) : +{ + SqlNode e; + final List list = new ArrayList(); +} +{ + // Lookhead to distinguish between "TABLE emp" (which will be + // matched by ExplicitTable() via Query()) + // and "TABLE fun(args)" (which will be matched by TableRef()) + ( + LOOKAHEAD(2) + e = Query(exprContext) + e = OrderByLimitOpt(e) + { return e; } + | + e = TableRef1(ExprContext.ACCEPT_QUERY_OR_JOIN) + ( e = JoinTable(e) )* + { list.add(e); } + ( AddSetOpQuery(list, exprContext) )* + { return SqlParserUtil.toTree(list); } + ) +} + +/** + * Parses either a row expression or a query expression with an optional + * ORDER BY. + * + *

Postgres syntax for limit: + * + *

+ *    [ LIMIT { count | ALL } ]
+ *    [ OFFSET start ]
+ *
+ * + *

Trino syntax for limit: + * + *

+ *    [ OFFSET start ]
+ *    [ LIMIT { count | ALL } ]
+ *
+ * + *

MySQL syntax for limit: + * + *

+ *    [ LIMIT { count | start, count } ]
+ *
+ * + *

SQL:2008 syntax for limit: + * + *

+ *    [ OFFSET start { ROW | ROWS } ]
+ *    [ FETCH { FIRST | NEXT } [ count ] { ROW | ROWS } ONLY ]
+ *
+ */ +SqlNode OrderedQueryOrExpr(ExprContext exprContext) : +{ + SqlNode e; +} +{ + e = QueryOrExpr(exprContext) + e = OrderByLimitOpt(e) + { return e; } +} + +/** Reads optional "ORDER BY", "LIMIT", "OFFSET", "FETCH" following a query, + * {@code e}. If any of them are present, adds them to the query; + * otherwise returns the query unchanged. + * Throws if they are present and {@code e} is not a query. */ +SqlNode OrderByLimitOpt(SqlNode e) : +{ + final SqlNodeList orderBy; + final Span s = Span.of(); + SqlNode[] offsetFetch = {null, null}; +} +{ + ( + // use the syntactic type of the expression we just parsed + // to decide whether ORDER BY makes sense + orderBy = OrderBy(e.isA(SqlKind.QUERY)) + | { orderBy = null; } + ) + [ + LimitClause(s, offsetFetch) + [ OffsetClause(s, offsetFetch) ] + | + OffsetClause(s, offsetFetch) + [ + LimitClause(s, offsetFetch) { + if (!this.conformance.isOffsetLimitAllowed()) { + throw SqlUtil.newContextException(s.end(this), + RESOURCE.offsetLimitNotAllowed()); + } + } + | + FetchClause(offsetFetch) + ] + | + FetchClause(offsetFetch) + ] + { + if (orderBy != null || offsetFetch[0] != null || offsetFetch[1] != null) { + return new SqlOrderBy(getPos(), e, + Util.first(orderBy, SqlNodeList.EMPTY), + offsetFetch[0], offsetFetch[1]); + } + return e; + } +} + +/** + * Parses an OFFSET clause in an ORDER BY expression. + */ +void OffsetClause(Span s, SqlNode[] offsetFetch) : +{ +} +{ + // ROW or ROWS is required in SQL:2008 but we make it optional + // because it is not present in Postgres-style syntax. + { s.add(this); } + offsetFetch[0] = UnsignedNumericLiteralOrParam() + [ | ] +} + +/** + * Parses a FETCH clause in an ORDER BY expression. + */ +void FetchClause(SqlNode[] offsetFetch) : +{ +} +{ + // SQL:2008-style syntax. "OFFSET ... FETCH ...". + // If you specify both LIMIT and FETCH, FETCH wins. + ( | ) offsetFetch[1] = UnsignedNumericLiteralOrParam() + ( | ) +} + +/** + * Parses a LIMIT clause in an ORDER BY expression. + */ +void LimitClause(Span s, SqlNode[] offsetFetch) : +{ + final String error; +} +{ + // Postgres-style syntax. "LIMIT ... OFFSET ..." + { s.add(this); } + ( + // MySQL-style syntax. "LIMIT start, count" or "LIMIT start, ALL" + LOOKAHEAD(2) + offsetFetch[0] = UnsignedNumericLiteralOrParam() + + ( + offsetFetch[1] = UnsignedNumericLiteralOrParam() { + error = "count"; + } + | + { + error = "ALL"; + } + ) { + if (!this.conformance.isLimitStartCountAllowed()) { + throw SqlUtil.newContextException(s.end(this), + RESOURCE.limitStartCountOrAllNotAllowed(error)); + } + } + | + offsetFetch[1] = UnsignedNumericLiteralOrParam() + | + + ) +} + +/** + * Parses a leaf in a query expression (SELECT, VALUES or TABLE). + */ +SqlNode LeafQuery(ExprContext exprContext) : +{ + SqlNode e; +} +{ + { + // ensure a query is legal in this context + checkQueryExpression(exprContext); + } + e = SqlSelect() { return e; } +| + e = TableConstructor() { return e; } +| + e = ExplicitTable(getPos()) { return e; } +} + +/** + * Parses a parenthesized query or single row expression. + * Depending on {@code exprContext}, may also accept a join. + */ +SqlNode ParenthesizedExpression(ExprContext exprContext) : +{ + SqlNode e; +} +{ + + { + // we've now seen left paren, so queries inside should + // be allowed as sub-queries + switch (exprContext) { + case ACCEPT_SUB_QUERY: + exprContext = ExprContext.ACCEPT_NONCURSOR; + break; + case ACCEPT_CURSOR: + exprContext = ExprContext.ACCEPT_ALL; + break; + } + } + e = ExprOrJoinOrOrderedQuery(exprContext) + + { + exprContext.throwIfNotCompatible(e); + return e; + } +} + +/** + * Parses a parenthesized query or comma-list of row expressions. + * + *

REVIEW jvs 8-Feb-2004: There's a small hole in this production. It can be + * used to construct something like + * + *

+ * WHERE x IN (select count(*) from t where c=d,5)
+ *
+ * + *

which should be illegal. The above is interpreted as equivalent to + * + *

+ * WHERE x IN ((select count(*) from t where c=d),5)
+ *
+ * + *

which is a legal use of a sub-query. The only way to fix the hole is to + * be able to remember whether a subexpression was parenthesized or not, which + * means preserving parentheses in the SqlNode tree. This is probably + * desirable anyway for use in purely syntactic parsing applications (e.g. SQL + * pretty-printer). However, if this is done, it's important to also make + * isA() on the paren node call down to its operand so that we can + * always correctly discriminate a query from a row expression. + */ +SqlNodeList ParenthesizedQueryOrCommaList( + ExprContext exprContext) : +{ + SqlNode e; + final List list = new ArrayList(); + ExprContext firstExprContext = exprContext; + final Span s; +} +{ + + { + // we've now seen left paren, so a query by itself should + // be interpreted as a sub-query + s = span(); + switch (exprContext) { + case ACCEPT_SUB_QUERY: + firstExprContext = ExprContext.ACCEPT_NONCURSOR; + break; + case ACCEPT_CURSOR: + firstExprContext = ExprContext.ACCEPT_ALL; + break; + } + } + e = OrderedQueryOrExpr(firstExprContext) { list.add(e); } + ( + + { + // a comma-list can't appear where only a query is expected + checkNonQueryExpression(exprContext); + } + AddExpression(list, exprContext) + )* + + { + return new SqlNodeList(list, s.end(this)); + } +} + +/** As ParenthesizedQueryOrCommaList, but allows DEFAULT + * in place of any of the expressions. For example, + * {@code (x, DEFAULT, null, DEFAULT)}. */ +SqlNodeList ParenthesizedQueryOrCommaListWithDefault( + ExprContext exprContext) : +{ + SqlNode e; + final List list = new ArrayList(); + ExprContext firstExprContext = exprContext; + final Span s; +} +{ + + { + // we've now seen left paren, so a query by itself should + // be interpreted as a sub-query + s = span(); + switch (exprContext) { + case ACCEPT_SUB_QUERY: + firstExprContext = ExprContext.ACCEPT_NONCURSOR; + break; + case ACCEPT_CURSOR: + firstExprContext = ExprContext.ACCEPT_ALL; + break; + } + } + ( + e = OrderedQueryOrExpr(firstExprContext) { list.add(e); } + | + e = Default() { list.add(e); } + ) + ( + + { + // a comma-list can't appear where only a query is expected + checkNonQueryExpression(exprContext); + } + ( + e = Expression(exprContext) { list.add(e); } + | + e = Default() { list.add(e); } + ) + )* + + { + return new SqlNodeList(list, s.end(this)); + } +} + +/** + * Parses function parameter lists. + * If the list starts with DISTINCT or ALL, it is discarded. + */ +List UnquantifiedFunctionParameterList(ExprContext exprContext) : +{ + final List args; +} +{ + args = FunctionParameterList(exprContext) { + args.remove(0); // remove DISTINCT or ALL, if present + return args; + } +} + +/** + * Parses function parameter lists including DISTINCT keyword recognition, + * DEFAULT, and named argument assignment. + */ +List FunctionParameterList(ExprContext exprContext) : +{ + final SqlLiteral qualifier; + final List list = new ArrayList(); +} +{ + + ( + qualifier = AllOrDistinct() { list.add(qualifier); } + | + { list.add(null); } + ) + AddArg0(list, exprContext) + ( + { + // a comma-list can't appear where only a query is expected + checkNonQueryExpression(exprContext); + } + AddArg(list, exprContext) + )* + + { + return list; + } +} + +SqlLiteral AllOrDistinct() : +{ +} +{ + { return SqlSelectKeyword.DISTINCT.symbol(getPos()); } +| + { return SqlSelectKeyword.ALL.symbol(getPos()); } +} + +void AddArg0(List list, ExprContext exprContext) : +{ + final SqlIdentifier name; + SqlNode e; + final ExprContext firstExprContext; + { + // we've now seen left paren, so queries inside should + // be allowed as sub-queries + switch (exprContext) { + case ACCEPT_SUB_QUERY: + firstExprContext = ExprContext.ACCEPT_NONCURSOR; + break; + case ACCEPT_CURSOR: + firstExprContext = ExprContext.ACCEPT_ALL; + break; + default: + firstExprContext = exprContext; + break; + } + } +} +{ + ( + LOOKAHEAD(2) name = SimpleIdentifier() + | { name = null; } + ) + ( + e = Default() + | + LOOKAHEAD((SimpleIdentifierOrList() | ) ) + e = LambdaExpression() + | + LOOKAHEAD(3) + e = TableParam() + | + e = PartitionedQueryOrQueryOrExpr(firstExprContext) + ) + { + if (name != null) { + e = SqlStdOperatorTable.ARGUMENT_ASSIGNMENT.createCall( + Span.of(name, e).pos(), e, name); + } + list.add(e); + } +} + +void AddArg(List list, ExprContext exprContext) : +{ + final SqlIdentifier name; + SqlNode e; +} +{ + ( + LOOKAHEAD(2) name = SimpleIdentifier() + | { name = null; } + ) + ( + e = Default() + | + LOOKAHEAD((SimpleIdentifierOrList() | ) ) + e = LambdaExpression() + | + e = Expression(exprContext) + | + e = TableParam() + ) + { + if (name != null) { + e = SqlStdOperatorTable.ARGUMENT_ASSIGNMENT.createCall( + Span.of(name, e).pos(), e, name); + } + list.add(e); + } +} + +SqlNode Default() : {} +{ + { + return SqlStdOperatorTable.DEFAULT.createCall(getPos()); + } +} + +/** + * Parses a query (SELECT, UNION, INTERSECT, EXCEPT, VALUES, TABLE) followed by + * the end-of-file symbol. + */ +SqlNode SqlQueryEof() : +{ + SqlNode query; +} +{ + query = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY) + + { return query; } +} + +/** + * Parses a list of SQL statements separated by semicolon. + * The semicolon is required between statements, but is + * optional at the end. + */ +SqlNodeList SqlStmtList() : +{ + final List stmtList = new ArrayList(); + SqlNode stmt; +} +{ + stmt = SqlStmt() { + stmtList.add(stmt); + } + ( + + [ + stmt = SqlStmt() { + stmtList.add(stmt); + } + ] + )* + + { + return new SqlNodeList(stmtList, Span.of(stmtList).pos()); + } +} + +/** + * Parses an SQL statement. + */ +SqlNode SqlStmt() : +{ + SqlNode stmt; +} +{ + ( +<#-- Add methods to parse additional statements here --> +<#list (parser.statementParserMethods!default.parser.statementParserMethods) as method> + LOOKAHEAD(2) stmt = ${method} + | + + stmt = ${parser.setOptionParserMethod!default.parser.setOptionParserMethod}(Span.of(), null) + | + stmt = SqlAlter() + | +<#if (parser.createStatementParserMethods!default.parser.createStatementParserMethods)?size != 0> + stmt = SqlCreate() + | + +<#if (parser.dropStatementParserMethods!default.parser.dropStatementParserMethods)?size != 0> + stmt = SqlDrop() + | + +<#if (parser.truncateStatementParserMethods!default.parser.truncateStatementParserMethods)?size != 0> + LOOKAHEAD(2) + stmt = SqlTruncate() + | + + stmt = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY) + | + stmt = SqlExplain() + | + stmt = SqlDescribe() + | + stmt = SqlInsert() + | + stmt = SqlDelete() + | + stmt = SqlUpdate() + | + stmt = SqlMerge() + | + stmt = SqlProcedureCall() + ) + { + return stmt; + } +} + +/** + * Parses an SQL statement followed by the end-of-file symbol. + */ +SqlNode SqlStmtEof() : +{ + SqlNode stmt; +} +{ + stmt = SqlStmt() + { + return stmt; + } +} + +<#-- Add implementations of additional parser statement calls here --> +<#list (parser.implementationFiles!default.parser.implementationFiles) as file> + <#include "/@includes/"+file /> + + +SqlNodeList ParenthesizedKeyValueOptionCommaList() : +{ + final Span s; + final List list = new ArrayList(); +} +{ + { s = span(); } + + AddKeyValueOption(list) + ( + + AddKeyValueOption(list) + )* + { + return new SqlNodeList(list, s.end(this)); + } +} + +/** +* Parses an option with format key=val whose key is a simple identifier or string literal +* and value is a string literal. +*/ +void AddKeyValueOption(List list) : +{ + final SqlNode key; + final SqlNode value; +} +{ + ( + key = SimpleIdentifier() + | + key = StringLiteral() + ) + + value = StringLiteral() { + list.add(key); + list.add(value); + } +} + +/** Parses an option value (either a string or a numeric) and adds to a list. */ +void AddOptionValue(List list) : +{ + final SqlNode value; +} +{ + ( + value = NumericLiteral() { list.add(value); } + | + value = StringLiteral() { list.add(value); } + ) +} + +/** + * Parses a literal list separated by comma. The literal is either a string or a numeric. + */ +SqlNodeList ParenthesizedLiteralOptionCommaList() : +{ + final Span s; + final List list = new ArrayList(); +} +{ + { s = span(); } + + AddOptionValue(list) ( AddOptionValue(list) )* + { + return new SqlNodeList(list, s.end(this)); + } +} + +void AddHint(List hints) : +{ + final SqlIdentifier hintName; + final SqlNodeList hintOptions; + final SqlHint.HintOptionFormat optionFormat; +} +{ + hintName = SimpleIdentifier() + ( + LOOKAHEAD(5) + hintOptions = ParenthesizedKeyValueOptionCommaList() { + optionFormat = SqlHint.HintOptionFormat.KV_LIST; + } + | + LOOKAHEAD(3) + hintOptions = ParenthesizedSimpleIdentifierList() { + optionFormat = SqlHint.HintOptionFormat.ID_LIST; + } + | + LOOKAHEAD(3) + hintOptions = ParenthesizedLiteralOptionCommaList() { + optionFormat = SqlHint.HintOptionFormat.LITERAL_LIST; + } + | + LOOKAHEAD(2) + [ ] + { + hintOptions = SqlNodeList.EMPTY; + optionFormat = SqlHint.HintOptionFormat.EMPTY; + } + ) + { + hints.add( + new SqlHint(Span.of(hintOptions).end(this), hintName, hintOptions, + optionFormat)); + } +} + +/** Parses hints following a table reference, + * and returns the wrapped table reference. */ +SqlNode TableHints(SqlIdentifier tableName) : +{ + final List hints = new ArrayList(); +} +{ + AddHint(hints) ( AddHint(hints) )* { + final SqlParserPos pos = Span.of(tableName).addAll(hints).end(this); + final SqlNodeList hintList = new SqlNodeList(hints, pos); + return new SqlTableRef(pos, tableName, hintList); + } +} + +/** + * Parses a leaf SELECT expression without ORDER BY. + */ +SqlSelect SqlSelect() : +{ + final List keywords = new ArrayList(); + final SqlLiteral keyword; + final SqlNodeList keywordList; + final List selectList = new ArrayList(); + final SqlNode fromClause; + final SqlNode where; + final SqlNodeList groupBy; + final SqlNode having; + final SqlNodeList windowDecls; + final SqlNode qualify; + final List hints = new ArrayList(); + final Span s; +} +{ +

+ | + | + | + | + | + | ) + (
)? + table = CompoundIdentifier() + ( column = SimpleIdentifier() | { column = null; } ) + { + return new SqlDescribeTable(s.add(table).addIf(column).pos(), + table, column); + } + | + (LOOKAHEAD(1) )? + stmt = SqlQueryOrDml() { + // DESCRIBE STATEMENT currently does the same as EXPLAIN. See + // [CALCITE-1221] Implement DESCRIBE DATABASE, CATALOG, STATEMENT + final SqlExplainLevel detailLevel = SqlExplainLevel.EXPPLAN_ATTRIBUTES; + final SqlExplain.Depth depth = SqlExplain.Depth.PHYSICAL; + final SqlExplainFormat format = SqlExplainFormat.TEXT; + return new SqlExplain(s.end(stmt), + stmt, + detailLevel.symbol(SqlParserPos.ZERO), + depth.symbol(SqlParserPos.ZERO), + format.symbol(SqlParserPos.ZERO), + nDynamicParams); + } + ) +} + +/** + * Parses a CALL statement. + */ +SqlNode SqlProcedureCall() : +{ + final Span s; + SqlNode routineCall; +} +{ + { + s = span(); + } + routineCall = NamedRoutineCall( + SqlFunctionCategory.USER_DEFINED_PROCEDURE, + ExprContext.ACCEPT_SUB_QUERY) + { + return SqlStdOperatorTable.PROCEDURE_CALL.createCall( + s.end(routineCall), routineCall); + } +} + +SqlNode NamedRoutineCall( + SqlFunctionCategory routineType, + ExprContext exprContext) : +{ + final SqlIdentifier name; + final List list = new ArrayList(); + final Span s; +} +{ + name = CompoundIdentifier() { + s = span(); + } + + [ + AddArg0(list, exprContext) + ( + { + // a comma-list can't appear where only a query is expected + checkNonQueryExpression(exprContext); + } + AddArg(list, exprContext) + )* + ] + + { + return createCall(name, s.end(this), routineType, null, list); + } +} + +/** + * Table parameter of a table function. + * The input table with set semantics may be partitioned/ordered on one or more columns. + */ +SqlNode TableParam() : +{ + final Span s; + final SqlNodeList partitionList; + final SqlNodeList orderList; + SqlNode tableRef; +} +{ + { s = span(); } + tableRef = ExplicitTable(getPos()) + ( + + partitionList = SimpleIdentifierOrList() + | { partitionList = SqlNodeList.EMPTY; } + ) + ( + orderList = OrderByOfSetSemanticsTable() + | { orderList = SqlNodeList.EMPTY; } + ) + { return CreateSetSemanticsTableIfNeeded(s, tableRef, partitionList, orderList); } +} + +SqlNode PartitionedQueryOrQueryOrExpr(ExprContext exprContext) : +{ + SqlNode e; +} +{ + e = OrderedQueryOrExpr(exprContext) + e = PartitionedByAndOrderBy(e) + + { return e; } +} + +SqlNode PartitionedByAndOrderBy(SqlNode e) : +{ + final Span s; + final SqlNodeList partitionList; + final SqlNodeList orderList; +} +{ + { s = span(); } + ( + + partitionList = SimpleIdentifierOrList() + | { partitionList = SqlNodeList.EMPTY; } + ) + ( + orderList = OrderByOfSetSemanticsTable() + | { orderList = SqlNodeList.EMPTY; } + ) + { return CreateSetSemanticsTableIfNeeded(s, e, partitionList, orderList); } +} + +SqlNodeList OrderByOfSetSemanticsTable() : +{ + final List list = new ArrayList(); + final Span s; +} +{ + + { s = span(); } + + ( + LOOKAHEAD(2) + AddOrderItem(list) + ( + // NOTE jvs 6-Feb-2004: See comments at top of file for why + // hint is necessary here. + LOOKAHEAD(2) AddOrderItem(list) + )* + { + return new SqlNodeList(list, s.addAll(list).pos()); + } + | + AddOrderItem(list) + { + return new SqlNodeList(list, s.addAll(list).pos()); + } + ) +} + +SqlNode CreateSetSemanticsTableIfNeeded( + final Span s, + final SqlNode e, + final SqlNodeList partitionList, + final SqlNodeList orderList) : +{ + +} +{ + + { + if (partitionList.isEmpty() && orderList.isEmpty()) { + return e; + } else { + return SqlStdOperatorTable.SET_SEMANTICS_TABLE.createCall( + s.pos(), e, partitionList, orderList); + } + } +} + +/** + * Parses an INSERT statement. + */ +SqlNode SqlInsert() : +{ + final List keywords = new ArrayList(); + final SqlNodeList keywordList; + final SqlIdentifier tableName; + SqlNode tableRef; + SqlNode source; + final SqlNodeList columnList; + final Span s; + final Pair p; +} +{ + ( + + | + { keywords.add(SqlInsertKeyword.UPSERT.symbol(getPos())); } + ) + { s = span(); } + SqlInsertKeywords(keywords) { + keywordList = new SqlNodeList(keywords, s.addAll(keywords).pos()); + } + tableName = CompoundTableIdentifier() + ( tableRef = TableHints(tableName) | { tableRef = tableName; } ) + [ LOOKAHEAD(5) tableRef = ExtendTable(tableRef) ] + ( + LOOKAHEAD(2) + p = ParenthesizedCompoundIdentifierList() { + if (!p.right.isEmpty()) { + tableRef = extend(tableRef, p.right); + } + if (!p.left.isEmpty()) { + columnList = p.left; + } else { + columnList = null; + } + } + | { columnList = null; } + ) + source = OrderedQueryOrExpr(ExprContext.ACCEPT_QUERY) { + return new SqlInsert(s.end(source), keywordList, tableRef, source, + columnList); + } +} + +/* + * Abstract production: + * + * void SqlInsertKeywords(List keywords) + * + * Parses dialect-specific keywords immediately following the INSERT keyword. + */ + +/** + * Parses a DELETE statement. + */ +SqlNode SqlDelete() : +{ + final SqlIdentifier tableName; + SqlNode tableRef; + final SqlIdentifier alias; + final SqlNode where; + final Span s; +} +{ + { + s = span(); + } + tableName = CompoundTableIdentifier() + ( tableRef = TableHints(tableName) | { tableRef = tableName; } ) + [ tableRef = ExtendTable(tableRef) ] + ( [ ] alias = SimpleIdentifier() | { alias = null; } ) + ( where = Where() | { where = null; } ) + { + return new SqlDelete(s.add(tableRef).addIf(alias).addIf(where).pos(), + tableRef, where, null, alias); + } +} + +/** + * Parses an UPDATE statement. + */ +SqlNode SqlUpdate() : +{ + final SqlIdentifier tableName; + SqlNode tableRef; + final SqlIdentifier alias; + final SqlNode where; + final SqlNodeList sourceExpressionList; + final SqlNodeList targetColumnList; + SqlIdentifier id; + final Span s; +} +{ + { + s = span(); + targetColumnList = new SqlNodeList(s.pos()); + sourceExpressionList = new SqlNodeList(s.pos()); + } + tableName = CompoundTableIdentifier() + ( tableRef = TableHints(tableName) | { tableRef = tableName; } ) + [ tableRef = ExtendTable(tableRef) ] + ( [ ] alias = SimpleIdentifier() | { alias = null; } ) + id = CompoundIdentifier() { + targetColumnList.add(id); + } + // TODO: support DEFAULT also + AddExpression(sourceExpressionList, ExprContext.ACCEPT_SUB_QUERY) + ( + + id = CompoundIdentifier() { targetColumnList.add(id); } + AddExpression(sourceExpressionList, ExprContext.ACCEPT_SUB_QUERY) + )* + ( where = Where() | { where = null; } ) + { + final SqlParserPos pos = s.addAll(targetColumnList) + .addAll(sourceExpressionList).addIf(where).pos(); + return new SqlUpdate(pos, tableRef, targetColumnList, + sourceExpressionList, where, null, alias); + } +} + +/** + * Parses a MERGE statement. + */ +SqlNode SqlMerge() : +{ + final SqlIdentifier tableName; + SqlNode tableRef; + final SqlIdentifier alias; + final SqlNode sourceTableRef; + final SqlNode condition; + final SqlUpdate updateCall; + final SqlInsert insertCall; + final Span s; +} +{ + { s = span(); } tableName = CompoundTableIdentifier() + ( tableRef = TableHints(tableName) | { tableRef = tableName; } ) + [ tableRef = ExtendTable(tableRef) ] + ( [ ] alias = SimpleIdentifier() | { alias = null; } ) + sourceTableRef = TableRef() + condition = Expression(ExprContext.ACCEPT_SUB_QUERY) + ( + LOOKAHEAD(2) + updateCall = WhenMatchedClause(tableRef, alias) + ( insertCall = WhenNotMatchedClause(tableRef) | { insertCall = null; } ) + | + { updateCall = null; } + insertCall = WhenNotMatchedClause(tableRef) + ) + { + final SqlParserPos pos = s.addIf(updateCall).addIf(insertCall).pos(); + return new SqlMerge(pos, tableRef, condition, sourceTableRef, + updateCall, insertCall, null, alias); + } +} + +SqlUpdate WhenMatchedClause(SqlNode table, SqlIdentifier alias) : +{ + SqlIdentifier id; + final Span s; + final SqlNodeList updateColumnList = new SqlNodeList(SqlParserPos.ZERO); + final SqlNodeList updateExprList = new SqlNodeList(SqlParserPos.ZERO); +} +{ + { s = span(); } + id = CompoundIdentifier() { + updateColumnList.add(id); + } + AddExpression(updateExprList, ExprContext.ACCEPT_SUB_QUERY) + ( + + id = CompoundIdentifier() { + updateColumnList.add(id); + } + AddExpression(updateExprList, ExprContext.ACCEPT_SUB_QUERY) + )* + { + return new SqlUpdate(s.addAll(updateExprList).pos(), table, + updateColumnList, updateExprList, null, null, alias); + } +} + +SqlInsert WhenNotMatchedClause(SqlNode table) : +{ + final Span insertSpan, valuesSpan; + final List keywords = new ArrayList(); + final SqlNodeList keywordList; + final SqlNodeList insertColumnList; + SqlNode rowConstructor; + SqlNode insertValues; +} +{ + { + insertSpan = span(); + } + SqlInsertKeywords(keywords) { + keywordList = new SqlNodeList(keywords, insertSpan.end(this)); + } + ( + LOOKAHEAD(2) + insertColumnList = ParenthesizedSimpleIdentifierList() + | { insertColumnList = null; } + ) + ( + + { valuesSpan = span(); } rowConstructor = RowConstructor() + + | + { valuesSpan = span(); } rowConstructor = RowConstructor() + ) + { + // TODO zfong 5/26/06: note that extra parentheses are accepted above + // around the VALUES clause as a hack for unparse, but this is + // actually invalid SQL; should fix unparse + insertValues = SqlStdOperatorTable.VALUES.createCall( + valuesSpan.end(this), rowConstructor); + return new SqlInsert(insertSpan.end(this), keywordList, + table, insertValues, insertColumnList); + } +} + +/** + * Parses one item in a select list. + */ +void AddSelectItem(List list) : +{ + SqlNode e; + final SqlIdentifier id; +} +{ + e = SelectExpression() + ( + [ + [ { + e = SqlInternalOperators.MEASURE.createCall( + e.getParserPosition(), e); + } + ] + ] + ( + id = SimpleIdentifier() + | + // Mute the warning about ambiguity between alias and continued + // string literal. + LOOKAHEAD(1) + id = SimpleIdentifierFromStringLiteral() + ) + { list.add(SqlStdOperatorTable.AS.createCall(span().end(e), e, id)); } + | { list.add(e); } + ) +} + +/** + * Parses one unaliased expression in a select list. + */ +SqlNode SelectExpression() : +{ + SqlNode e; +} +{ + { + return SqlIdentifier.star(getPos()); + } +| + e = Expression(ExprContext.ACCEPT_SUB_QUERY) { + return e; + } +} + +SqlLiteral Natural() : +{ +} +{ + { return SqlLiteral.createBoolean(true, getPos()); } +| + { return SqlLiteral.createBoolean(false, getPos()); } +} + +SqlLiteral JoinType() : +{ + JoinType joinType; + boolean asof = false; +} +{ + ( +<#list (parser.joinTypes!default.parser.joinTypes) as method> + LOOKAHEAD(3) // required for "LEFT SEMI JOIN" and "LEFT ANTI JOIN" in Babel + joinType = ${method}() + | + + { joinType = JoinType.INNER; } + | + { joinType = JoinType.INNER; } + | + { joinType = JoinType.ASOF; } + | + [ | { asof = true; } ] { joinType = asof ? JoinType.LEFT_ASOF : JoinType.LEFT; } + | + [ ] { joinType = JoinType.RIGHT; } + | + [ ] { joinType = JoinType.FULL; } + | + { joinType = JoinType.CROSS; } + ) + { + return joinType.symbol(getPos()); + } +} + +/** + * Parses the FROM clause for a SELECT. + * + *

FROM is mandatory in standard SQL, optional in dialects such as MySQL, + * PostgreSQL. The parser allows SELECT without FROM, but the validator fails + * if conformance is, say, STRICT_2003. + */ +SqlNode FromClause() : +{ + SqlNode e, e2; + SqlLiteral joinType; +} +{ + e = TableRef1(ExprContext.ACCEPT_QUERY_OR_JOIN) + ( + // Comma joins should only occur at top-level in the FROM clause. + // Valid: + // * FROM a, b + // * FROM (a CROSS JOIN b), c + // Not valid: + // * FROM a CROSS JOIN (b, c) + LOOKAHEAD(1) + e = JoinOrCommaTable(e) + )* + { return e; } +} + +SqlNode JoinOrCommaTable(SqlNode e) : +{ + SqlNode e2; + SqlLiteral joinType; +} +{ + LOOKAHEAD(2) + { joinType = JoinType.COMMA.symbol(getPos()); } + e2 = TableRef1(ExprContext.ACCEPT_QUERY_OR_JOIN) { + return new SqlJoin(joinType.getParserPosition(), + e, + SqlLiteral.createBoolean(false, joinType.getParserPosition()), + joinType, + e2, + JoinConditionType.NONE.symbol(SqlParserPos.ZERO), + null); + } +| + e2 = JoinTable(e) { return e2; } +} + +/** Matches "LEFT JOIN t ON ...", "RIGHT JOIN t USING ...", "JOIN t". */ +SqlNode JoinTable(SqlNode e) : +{ + SqlNode e2, condition, matchCondition = null; + final SqlLiteral natural, joinType, on, using; + SqlNodeList list; +} +{ + // LOOKAHEAD(3) is needed here rather than a LOOKAHEAD(2) because JavaCC + // calculates minimum lookahead count incorrectly for choice that contains + // zero size child. For instance, with the generated code, + // "LOOKAHEAD(2, Natural(), JoinType())" + // returns true immediately if it sees a single "" token. Where we + // expect the lookahead succeeds after " ". + // + // For more information about the issue, + // see https://github.com/javacc/javacc/issues/86 + // + // We allow CROSS JOIN (joinType = CROSS_JOIN) to have a join condition, + // even though that is not valid SQL; the validator will catch it. + LOOKAHEAD(4) + natural = Natural() + joinType = JoinType() + e2 = TableRef1(ExprContext.ACCEPT_QUERY_OR_JOIN) + ( + [ matchCondition = Expression(ExprContext.ACCEPT_SUB_QUERY) ] + { on = JoinConditionType.ON.symbol(getPos()); } + condition = Expression(ExprContext.ACCEPT_SUB_QUERY) { + JoinType type = joinType.getValueAs(JoinType.class); + if (matchCondition != null) { + if (type != JoinType.ASOF && type != JoinType.LEFT_ASOF) { + throw SqlUtil.newContextException(getPos(), RESOURCE.matchConditionRequiresAsof()); + } + return new SqlAsofJoin(joinType.getParserPosition(), + e, + natural, + joinType, + e2, + on, + condition, + matchCondition); + } else { + if (type == JoinType.ASOF || type == JoinType.LEFT_ASOF) { + throw SqlUtil.newContextException(getPos(), RESOURCE.asofRequiresMatchCondition()); + } + } + return new SqlJoin(joinType.getParserPosition(), + e, + natural, + joinType, + e2, + on, + condition); + } + | + { using = JoinConditionType.USING.symbol(getPos()); } + list = ParenthesizedSimpleIdentifierList() { + return new SqlJoin(joinType.getParserPosition(), + e, + natural, + joinType, + e2, + using, + new SqlNodeList(list, Span.of(using).end(this))); + } + | + { + return new SqlJoin(joinType.getParserPosition(), + e, + natural, + joinType, + e2, + JoinConditionType.NONE.symbol(joinType.getParserPosition()), + null); + } + ) +| + { joinType = JoinType.CROSS.symbol(getPos()); } + e2 = TableRef2(true) { + if (!this.conformance.isApplyAllowed()) { + throw SqlUtil.newContextException(getPos(), RESOURCE.applyNotAllowed()); + } + return new SqlJoin(joinType.getParserPosition(), + e, + SqlLiteral.createBoolean(false, joinType.getParserPosition()), + joinType, + e2, + JoinConditionType.NONE.symbol(SqlParserPos.ZERO), + null); + } +| + { joinType = JoinType.LEFT.symbol(getPos()); } + e2 = TableRef2(true) { + if (!this.conformance.isApplyAllowed()) { + throw SqlUtil.newContextException(getPos(), RESOURCE.applyNotAllowed()); + } + return new SqlJoin(joinType.getParserPosition(), + e, + SqlLiteral.createBoolean(false, joinType.getParserPosition()), + joinType, + e2, + JoinConditionType.ON.symbol(SqlParserPos.ZERO), + SqlLiteral.createBoolean(true, joinType.getParserPosition())); + } +} + +/** + * Parses a table reference in a FROM clause, not lateral unless LATERAL + * is explicitly specified. + */ +SqlNode TableRef() : +{ + final SqlNode e; +} +{ + e = TableRef3(ExprContext.ACCEPT_QUERY, false) { return e; } +} + +SqlNode TableRef1(ExprContext exprContext) : +{ + final SqlNode e; +} +{ + e = TableRef3(exprContext, false) { return e; } +} + +/** + * Parses a table reference in a FROM clause. + */ +SqlNode TableRef2(boolean lateral) : +{ + final SqlNode e; +} +{ + e = TableRef3(ExprContext.ACCEPT_QUERY, lateral) { return e; } +} + +SqlNode TableRef3(ExprContext exprContext, boolean lateral) : +{ + final SqlIdentifier tableName; + SqlNode tableRef; + final SqlIdentifier alias; + final Span s; + SqlNodeList args; + final SqlNodeList columnAliasList; + SqlUnnestOperator unnestOp = SqlStdOperatorTable.UNNEST; +} +{ + ( + LOOKAHEAD(2) + tableName = CompoundTableIdentifier() { s = span(); } + ( + // Table call syntax like FROM a.b() instead of FROM TABLE(a.b()) + // Three tokens needed to disambiguate EXTEND syntax from CALCITE-493. + // Example: "FROM EventLog(lastGCTime TIME)". + LOOKAHEAD(3) + tableRef = ImplicitTableFunctionCallArgs(tableName) + | + ( tableRef = TableHints(tableName) | { tableRef = tableName; } ) + [ tableRef = ExtendTable(tableRef) ] + tableRef = Over(tableRef) + [ tableRef = Snapshot(tableRef) ] + [ tableRef = MatchRecognize(tableRef) ] + ) + | + LOOKAHEAD(2) + [ { lateral = true; } ] + tableRef = ParenthesizedExpression(exprContext) + tableRef = Over(tableRef) + tableRef = addLateral(tableRef, lateral) + [ tableRef = MatchRecognize(tableRef) ] + | + LOOKAHEAD(2) + [ ] // "LATERAL" is implicit with "UNNEST", so ignore + { s = span(); } + args = ParenthesizedQueryOrCommaList(ExprContext.ACCEPT_SUB_QUERY) + [ + { + unnestOp = SqlStdOperatorTable.UNNEST_WITH_ORDINALITY; + } + ] + { + tableRef = unnestOp.createCall(s.end(this), (List) args); + } + | + [ { lateral = true; } ] + tableRef = TableFunctionCall() + tableRef = addLateral(tableRef, lateral) + | + tableRef = ExtendedTableRef() + ) + [ + LOOKAHEAD(2) + tableRef = Pivot(tableRef) + ] + [ + LOOKAHEAD(2) + tableRef = Unpivot(tableRef) + ] + [ + [ ] alias = SimpleIdentifier() + ( + columnAliasList = ParenthesizedSimpleIdentifierList() + | { columnAliasList = null; } + ) + { + // Standard SQL (and Postgres) allow applying "AS alias" to a JOIN, + // e.g. "FROM (a CROSS JOIN b) AS c". The new alias obscures the + // internal aliases, and columns cannot be referenced if they are + // not unique. TODO: Support this behavior; see + // [CALCITE-5168] Allow AS after parenthesized JOIN + checkNotJoin(tableRef); + if (columnAliasList == null) { + tableRef = SqlStdOperatorTable.AS.createCall( + Span.of(tableRef).end(this), tableRef, alias); + } else { + List idList = new ArrayList(); + idList.add(tableRef); + idList.add(alias); + idList.addAll(columnAliasList.getList()); + tableRef = SqlStdOperatorTable.AS.createCall( + Span.of(tableRef).end(this), idList); + } + } + ] + [ tableRef = Tablesample(tableRef) ] + { return tableRef; } +} + +SqlNode Tablesample(SqlNode tableRef) : +{ + final Span s; + final SqlNode sample; + final boolean isBernoulli; + final SqlNumericLiteral samplePercentage; + boolean isRepeatable = false; + int repeatableSeed = 0; +} +{ + { s = span(); checkNotJoin(tableRef); } + ( + sample = StringLiteral() + { + String sampleName = + SqlLiteral.unchain(sample).getValueAs(String.class); + SqlSampleSpec sampleSpec = SqlSampleSpec.createNamed(sampleName); + final SqlLiteral sampleLiteral = + SqlLiteral.createSample(sampleSpec, s.end(this)); + return SqlStdOperatorTable.TABLESAMPLE.createCall( + s.add(tableRef).end(this), tableRef, sampleLiteral); + } + | + ( + { isBernoulli = true; } + | + { isBernoulli = false; } + ) + samplePercentage = UnsignedNumericLiteral() + [ + repeatableSeed = IntLiteral() + { + isRepeatable = true; + } + ] + { + BigDecimal rate = + samplePercentage.bigDecimalValue().divide(ONE_HUNDRED); + SqlSampleSpec tableSampleSpec = + isRepeatable + ? SqlSampleSpec.createTableSample(isBernoulli, rate, + repeatableSeed) + : SqlSampleSpec.createTableSample(isBernoulli, rate); + SqlLiteral tableSampleLiteral = + SqlLiteral.createSample(tableSampleSpec, s.end(this)); + return SqlStdOperatorTable.TABLESAMPLE.createCall( + s.end(this), tableRef, tableSampleLiteral); + } + ) +} + +/** Wraps a table reference in a call to EXTEND if an optional "EXTEND" clause + * is present. */ +SqlNode ExtendTable(SqlNode tableRef) : +{ + final SqlNodeList extendList; +} +{ + [ ] + extendList = ExtendList() { + return extend(tableRef, extendList); + } +} + +SqlNodeList ExtendList() : +{ + final Span s; + List list = new ArrayList(); +} +{ + { s = span(); } + AddColumnType(list) + ( + AddColumnType(list) + )* + { + return new SqlNodeList(list, s.end(this)); + } +} + +void AddColumnType(List list) : +{ + final SqlIdentifier name; + final SqlDataTypeSpec type; + final boolean nullable; +} +{ + name = CompoundIdentifier() + type = DataType() + nullable = NotNullOpt() + { + list.add(name); + list.add(type.withNullable(nullable, getPos())); + } +} + +/** + * Parses a compound identifier with optional type. + */ +void AddCompoundIdentifierType(List list, List extendList) : +{ + final SqlIdentifier name; + final SqlDataTypeSpec type; + final boolean nullable; +} +{ + name = CompoundIdentifier() + ( + type = DataType() + nullable = NotNullOpt() + | + { type = null; nullable = true; } + ) + { + if (type != null) { + if (!this.conformance.allowExtend()) { + throw SqlUtil.newContextException(type.getParserPosition(), + RESOURCE.extendNotAllowed()); + } + extendList.add(name); + extendList.add(type.withNullable(nullable, getPos())); + } + list.add(name); + } +} + +SqlNode ImplicitTableFunctionCallArgs(SqlIdentifier name) : +{ + final List tableFuncArgs = new ArrayList(); + final SqlNode call; + final Span s; +} +{ + // Table call syntax like FROM a.b() instead of FROM TABLE(a.b()) + // We've already parsed the name, so we don't use NamedRoutineCall. + { s = span(); } + + [ + AddArg0(tableFuncArgs, ExprContext.ACCEPT_CURSOR) + ( + { + // a comma-list can't appear where only a query is expected + checkNonQueryExpression(ExprContext.ACCEPT_CURSOR); + } + AddArg(tableFuncArgs, ExprContext.ACCEPT_CURSOR) + )* + ] + + { + final SqlParserPos pos = s.end(this); + call = createCall(name, pos, + SqlFunctionCategory.USER_DEFINED_TABLE_FUNCTION, null, + tableFuncArgs); + return SqlStdOperatorTable.COLLECTION_TABLE.createCall(pos, + call); + } +} + +SqlNode TableFunctionCall() : +{ + final Span s; + final SqlNode call; + SqlFunctionCategory funcType = SqlFunctionCategory.USER_DEFINED_TABLE_FUNCTION; +} +{ +

{ s = span(); } + [ + + { + funcType = SqlFunctionCategory.USER_DEFINED_TABLE_SPECIFIC_FUNCTION; + } + ] + call = NamedRoutineCall(funcType, ExprContext.ACCEPT_CURSOR) + + { + return SqlStdOperatorTable.COLLECTION_TABLE.createCall(s.end(this), call); + } +} + +/** + * Abstract production: + * SqlNode ExtendedTableRef() + * + *

Allows parser to be extended with new types of table references. The + * default implementation of this production is empty. + */ + +/* + * Abstract production: + * + * SqlNode TableOverOpt() + * + * Allows an OVER clause following a table expression as an extension to + * standard SQL syntax. The default implementation of this production is empty. + */ + +/** + * Parses an explicit TABLE t reference. + */ +SqlNode ExplicitTable(SqlParserPos pos) : +{ + SqlNode tableRef; +} +{ +

tableRef = CompoundIdentifier() + { + return SqlStdOperatorTable.EXPLICIT_TABLE.createCall(pos, tableRef); + } +} + +/** + * Parses a VALUES leaf query expression. + */ +SqlNode TableConstructor() : +{ + final List list = new ArrayList(); + final Span s; +} +{ + ( + { s = span(); } + | + + { + s = span(); + if (!this.conformance.isValueAllowed()) { + throw SqlUtil.newContextException(getPos(), RESOURCE.valueNotAllowed()); + } + } + ) + AddRowConstructor(list) + ( + LOOKAHEAD(2) + AddRowConstructor(list) + )* + { + return SqlStdOperatorTable.VALUES.createCall(s.end(this), list); + } +} + +/** Parses a row constructor and adds it to a list. */ +void AddRowConstructor(List list) : +{ + SqlNode e; +} +{ + e = RowConstructor() { list.add(e); } +} + +/** + * Parses a row constructor in the context of a VALUES expression. + */ +SqlNode RowConstructor() : +{ + final SqlNodeList valueList; + final SqlNode value; + final Span s; +} +{ + // hints are necessary here due to common LPAREN prefixes + ( + // TODO jvs 8-Feb-2004: extra parentheses are accepted here as a hack + // for unparse, but this is actually invalid SQL; should + // fix unparse + LOOKAHEAD(3) + { s = span(); } + + valueList = ParenthesizedQueryOrCommaListWithDefault(ExprContext.ACCEPT_NONCURSOR) + { s.add(this); } + | + LOOKAHEAD(3) + ( + { s = span(); } + | + { s = Span.of(); } + ) + valueList = ParenthesizedQueryOrCommaListWithDefault(ExprContext.ACCEPT_NONCURSOR) + | + value = Expression(ExprContext.ACCEPT_NONCURSOR) + { + // NOTE: A bare value here is standard SQL syntax, believe it or + // not. Taken together with multi-row table constructors, it leads + // to very easy mistakes if you forget the parentheses on a + // single-row constructor. This is also the reason for the + // LOOKAHEAD in TableConstructor(). It would be so much more + // reasonable to require parentheses. Sigh. + s = Span.of(value); + valueList = new SqlNodeList(ImmutableList.of(value), + value.getParserPosition()); + } + ) + { + // REVIEW jvs 8-Feb-2004: Should we discriminate between scalar + // sub-queries inside of ROW and row sub-queries? The standard does, + // but the distinction seems to be purely syntactic. + return SqlStdOperatorTable.ROW.createCall(s.end(valueList), + (List) valueList); + } +} + +/** Parses a WHERE clause for SELECT, DELETE, and UPDATE. */ +SqlNode Where() : +{ + SqlNode condition; +} +{ + condition = Expression(ExprContext.ACCEPT_SUB_QUERY) { + return condition; + } +} + +/** Parses a GROUP BY clause for SELECT. */ +SqlNodeList GroupBy() : +{ + final List list; + final boolean distinct; + final Span s; +} +{ + { s = span(); } + + ( + { distinct = true; } + | { distinct = false; } + | { distinct = false; } + ) + list = GroupingElementList() { + final SqlParserPos pos = s.end(this); + final List list2 = distinct + ? ImmutableList.of( + SqlInternalOperators.GROUP_BY_DISTINCT.createCall(pos, list)) + : list; + return new SqlNodeList(list2, pos); + } +} + +List GroupingElementList() : +{ + final List list = new ArrayList(); +} +{ + AddGroupingElement(list) + ( LOOKAHEAD(2) AddGroupingElement(list) )* + { return list; } +} + +void AddGroupingElement(List list) : +{ + final List subList; + final SqlNodeList nodes; + final Span s; +} +{ + LOOKAHEAD(2) + { s = span(); } + subList = GroupingElementList() { + list.add( + SqlStdOperatorTable.GROUPING_SETS.createCall(s.end(this), subList)); + } +| { s = span(); } + nodes = ExpressionCommaList(s, ExprContext.ACCEPT_SUB_QUERY) + { + list.add( + SqlStdOperatorTable.ROLLUP.createCall(s.end(this), nodes.getList())); + } +| { s = span(); } + nodes = ExpressionCommaList(s, ExprContext.ACCEPT_SUB_QUERY) + { + list.add( + SqlStdOperatorTable.CUBE.createCall(s.end(this), nodes.getList())); + } +| LOOKAHEAD(3) + { s = span(); } { + list.add(new SqlNodeList(s.end(this))); + } +| AddExpression(list, ExprContext.ACCEPT_SUB_QUERY) +} + +/** + * Parses a list of expressions separated by commas. + */ +SqlNodeList ExpressionCommaList( + final Span s, + ExprContext exprContext) : +{ + final List list = new ArrayList(); +} +{ + AddExpressions(list, exprContext) { + return new SqlNodeList(list, s.addAll(list).pos()); + } +} + +/** + * Parses a list of expressions separated by commas, + * appending expressions to a given list. + */ +void AddExpressions(List list, ExprContext exprContext) : +{ +} +{ + AddExpression(list, exprContext) + ( + // NOTE jvs 6-Feb-2004: See comments at top of file for why + // hint is necessary here. + LOOKAHEAD(2) + AddExpression(list, ExprContext.ACCEPT_SUB_QUERY) + )* +} + +/** Parses a HAVING clause for SELECT. */ +SqlNode Having() : +{ + SqlNode e; +} +{ + e = Expression(ExprContext.ACCEPT_SUB_QUERY) { return e; } +} + +/** Parses a WINDOW clause for SELECT. */ +SqlNodeList Window() : +{ + final List list = new ArrayList(); + final Span s; +} +{ + { s = span(); } + AddWindowSpec(list) + ( + LOOKAHEAD(2) + AddWindowSpec(list) + )* + { + return new SqlNodeList(list, s.addAll(list).pos()); + } +} + +void AddWindowSpec(List list) : +{ + final SqlIdentifier id; + final SqlWindow e; +} +{ + id = SimpleIdentifier() e = WindowSpecification() { + e.setDeclName(id); + list.add(e); + } +} + +/** + * Parses a window specification. + */ +SqlWindow WindowSpecification() : +{ + final SqlIdentifier id; + final SqlNodeList partitionList; + final SqlNodeList orderList; + final SqlLiteral isRows; + final SqlNode lowerBound, upperBound; + final SqlLiteral exclude; + final Span s, s1, s2; + final SqlLiteral allowPartial; +} +{ + { s = span(); } + ( + id = SimpleIdentifier() + | { id = null; } + ) + ( + { s1 = span(); } + + partitionList = ExpressionCommaList(s1, ExprContext.ACCEPT_NON_QUERY) + | { partitionList = SqlNodeList.EMPTY; } + ) + ( + orderList = OrderBy(true) + | { orderList = SqlNodeList.EMPTY; } + ) + ( + ( + { isRows = SqlLiteral.createBoolean(true, getPos()); } + | + { isRows = SqlLiteral.createBoolean(false, getPos()); } + ) + ( + lowerBound = WindowRange() + upperBound = WindowRange() + | + lowerBound = WindowRange() + { upperBound = null; } + ) + exclude = WindowExclusion() + | + { + isRows = SqlLiteral.createBoolean(false, SqlParserPos.ZERO); + exclude = SqlWindow.createExcludeNoOthers(getPos()); + lowerBound = upperBound = null; + } + ) + ( + { s2 = span(); } { + allowPartial = SqlLiteral.createBoolean(true, s2.end(this)); + } + | + { s2 = span(); } { + allowPartial = SqlLiteral.createBoolean(false, s2.end(this)); + } + | { allowPartial = null; } + ) + + { + return SqlWindow.create(null, id, partitionList, orderList, + isRows, lowerBound, upperBound, allowPartial, exclude, s.end(this)); + } +} + +SqlNode WindowRange() : +{ + final SqlNode e; + final Span s; +} +{ + LOOKAHEAD(2) + { s = span(); } { + return SqlWindow.createCurrentRow(s.end(this)); + } +| + LOOKAHEAD(2) + { s = span(); } + ( + { + return SqlWindow.createUnboundedPreceding(s.end(this)); + } + | + { + return SqlWindow.createUnboundedFollowing(s.end(this)); + } + ) +| + e = Expression(ExprContext.ACCEPT_NON_QUERY) + ( + { + return SqlWindow.createPreceding(e, getPos()); + } + | + { + return SqlWindow.createFollowing(e, getPos()); + } + ) +} + +/** Parses an exclusion clause for WINDOW FRAME. */ +SqlLiteral WindowExclusion() : +{ +} +{ + ( + + ( + { return SqlWindow.createExcludeCurrentRow(getPos()); } + | + { return SqlWindow.createExcludeNoOthers(getPos()); } + | + { return SqlWindow.createExcludeGroup(getPos()); } + | + { return SqlWindow.createExcludeTies(getPos()); } + ) + | + { return SqlWindow.createExcludeNoOthers(SqlParserPos.ZERO); } + ) +} + +/** Parses a QUALIFY clause for SELECT. */ +SqlNode Qualify() : +{ + SqlNode e; +} +{ + e = Expression(ExprContext.ACCEPT_SUB_QUERY) { return e; } +} + +/** + * Parses an ORDER BY clause. + */ +SqlNodeList OrderBy(boolean accept) : +{ + final List list = new ArrayList(); + final Span s; +} +{ + { + s = span(); + if (!accept) { + // Someone told us ORDER BY wasn't allowed here. So why + // did they bother calling us? To get the correct + // parser position for error reporting. + throw SqlUtil.newContextException(s.pos(), RESOURCE.illegalOrderBy()); + } + } + AddOrderItem(list) + ( + // NOTE jvs 6-Feb-2004: See comments at top of file for why + // hint is necessary here. + LOOKAHEAD(2) AddOrderItem(list) + )* + { + return new SqlNodeList(list, s.addAll(list).pos()); + } +} + +/** + * Parses one item in an ORDER BY clause, and adds it to a list. + */ +void AddOrderItem(List list) : +{ + SqlNode e; +} +{ + e = Expression(ExprContext.ACCEPT_SUB_QUERY) + ( + + | { + e = SqlStdOperatorTable.DESC.createCall(getPos(), e); + } + )? + ( + LOOKAHEAD(2) + { + e = SqlStdOperatorTable.NULLS_FIRST.createCall(getPos(), e); + } + | + { + e = SqlStdOperatorTable.NULLS_LAST.createCall(getPos(), e); + } + )? + { + list.add(e); + } +} + +/** Wraps a table reference in a call to OVER if an optional "OVER" clause + * is present (if the dialect supports OVER for table expressions). */ +SqlNode Over(SqlNode tableRef) : +{ + final SqlNode over; +} +{ + over = TableOverOpt() { + if (over != null) { + return SqlStdOperatorTable.OVER.createCall( + getPos(), checkNotJoin(tableRef), over); + } else { + return tableRef; + } + } +} + +/** Wraps a table reference in a call to LATERAL if {@code lateral} is true. */ +JAVACODE SqlNode addLateral(SqlNode tableRef, boolean lateral) { + return lateral + ? SqlStdOperatorTable.LATERAL.createCall(getPos(), + checkNotJoin(tableRef)) + : tableRef; +} + +/** + * Parses a FOR SYSTEM_TIME clause following a table expression. + */ +SqlSnapshot Snapshot(SqlNode tableRef) : +{ + final Span s; + final SqlNode e; +} +{ + { s = span(); } + // Syntax for temporal table in + // standard SQL 2011 IWD 9075-2:201?(E) 7.6
+ // supports grammar as following: + // 1. datetime literal + // 2. datetime value function, i.e. CURRENT_TIMESTAMP + // 3. datetime term in 1 or 2 +(or -) interval term + + // We extend to support column reference, use Expression + // to simplify the parsing code. + e = Expression(ExprContext.ACCEPT_NON_QUERY) { + return new SqlSnapshot(s.end(this), tableRef, e); + } +} + +/** Parses a PIVOT clause following a table expression. */ +SqlNode Pivot(SqlNode tableRef) : +{ + final Span s; + final Span s2; + final List aggList = new ArrayList(); + final List valueList = new ArrayList(); + final SqlNodeList axisList; + final SqlNodeList inList; +} +{ + { s = span(); checkNotJoin(tableRef); } + + AddPivotAgg(aggList) ( AddPivotAgg(aggList) )* + axisList = SimpleIdentifierOrList() + { s2 = span(); } + [ AddPivotValue(valueList) ( AddPivotValue(valueList) )* ] + { + inList = new SqlNodeList(valueList, s2.end(this)); + } + + { + return new SqlPivot(s.end(this), tableRef, + new SqlNodeList(aggList, SqlParserPos.sum(aggList)), + axisList, inList); + } +} + +void AddPivotAgg(List list) : +{ + final SqlNode e; + final SqlIdentifier alias; +} +{ + e = NamedFunctionCall() + ( + // Because babel put FOR into non-reserved keyword set. + LOOKAHEAD({getToken(1).kind != COMMA && getToken(1).kind != FOR}) + [ ] alias = SimpleIdentifier() { + list.add( + SqlStdOperatorTable.AS.createCall(Span.of(e).end(this), e, + alias)); + } + | + { list.add(e); } + ) +} + +void AddPivotValue(List list) : +{ + final SqlNode e; + final SqlNodeList tuple; + final SqlIdentifier alias; +} +{ + e = RowConstructor() { tuple = SqlParserUtil.stripRow(e); } + ( + [ ] alias = SimpleIdentifier() { + list.add( + SqlStdOperatorTable.AS.createCall(Span.of(tuple).end(this), + tuple, alias)); + } + | + { list.add(tuple); } + ) +} + +/** Parses an UNPIVOT clause following a table expression. */ +SqlNode Unpivot(SqlNode tableRef) : +{ + final Span s; + final boolean includeNulls; + final SqlNodeList measureList; + final SqlNodeList axisList; + final Span s2; + final List values = new ArrayList(); + final SqlNodeList inList; +} +{ + { s = span(); checkNotJoin(tableRef); } + ( + { includeNulls = true; } + | { includeNulls = false; } + | { includeNulls = false; } + ) + + measureList = SimpleIdentifierOrList() + axisList = SimpleIdentifierOrList() + + { s2 = span(); } + AddUnpivotValue(values) ( AddUnpivotValue(values) )* + + { inList = new SqlNodeList(values, s2.end(this)); } + { + return new SqlUnpivot(s.end(this), tableRef, includeNulls, measureList, + axisList, inList); + } +} + +void AddUnpivotValue(List list) : +{ + final SqlNodeList columnList; + final SqlNode values; +} +{ + columnList = SimpleIdentifierOrList() + ( + values = RowConstructor() { + final SqlNodeList valueList = SqlParserUtil.stripRow(values); + list.add( + SqlStdOperatorTable.AS.createCall(Span.of(columnList).end(this), + columnList, valueList)); + } + | + { list.add(columnList); } + ) +} + +/** + * Parses a MATCH_RECOGNIZE clause following a table expression. + */ +SqlMatchRecognize MatchRecognize(SqlNode tableRef) : +{ + final Span s, s0, s1, s2; + final SqlNodeList measureList; + final SqlNodeList partitionList; + final SqlNodeList orderList; + final SqlNode pattern; + final SqlLiteral interval; + final SqlNodeList patternDefList; + final SqlNode after; + final SqlNode var; + final SqlLiteral rowsPerMatch; + final SqlNodeList subsetList; + final SqlLiteral isStrictStarts; + final SqlLiteral isStrictEnds; +} +{ + { s = span(); checkNotJoin(tableRef); } + ( + { s2 = span(); } + partitionList = ExpressionCommaList(s2, ExprContext.ACCEPT_NON_QUERY) + | + { partitionList = SqlNodeList.EMPTY; } + ) + ( + orderList = OrderBy(true) + | + { orderList = SqlNodeList.EMPTY; } + ) + ( + + measureList = MeasureColumnCommaList(span()) + | + { measureList = SqlNodeList.EMPTY; } + ) + ( + { s0 = span(); } { + rowsPerMatch = SqlMatchRecognize.RowsPerMatchOption.ONE_ROW.symbol(s0.end(this)); + } + | + { s0 = span(); } { + rowsPerMatch = SqlMatchRecognize.RowsPerMatchOption.ALL_ROWS.symbol(s0.end(this)); + } + | { rowsPerMatch = null; } + ) + ( + { s1 = span(); } + ( + + ( + LOOKAHEAD(2) + { + after = SqlMatchRecognize.AfterOption.SKIP_TO_NEXT_ROW + .symbol(s1.end(this)); + } + | + LOOKAHEAD(2) + var = SimpleIdentifier() { + after = SqlMatchRecognize.SKIP_TO_FIRST.createCall( + s1.end(var), var); + } + | + // This "LOOKAHEAD({true})" is a workaround for Babel. + // Because of babel parser uses option "LOOKAHEAD=2" globally, + // JavaCC generates something like "LOOKAHEAD(2, [] SimpleIdentifier())" + // here. But the correct LOOKAHEAD should be + // "LOOKAHEAD(2, [ LOOKAHEAD(2, SimpleIdentifier()) ] + // SimpleIdentifier())" which have the syntactic lookahead for considered. + // + // Overall LOOKAHEAD({true}) is even better as this is the last branch in the + // choice. + LOOKAHEAD({true}) + [ LOOKAHEAD(2, SimpleIdentifier()) ] var = SimpleIdentifier() { + after = SqlMatchRecognize.SKIP_TO_LAST.createCall( + s1.end(var), var); + } + ) + | + { + after = SqlMatchRecognize.AfterOption.SKIP_PAST_LAST_ROW + .symbol(s1.end(this)); + } + ) + | { after = null; } + ) + + + ( + { isStrictStarts = SqlLiteral.createBoolean(true, getPos()); } + | { isStrictStarts = SqlLiteral.createBoolean(false, getPos()); } + ) + pattern = PatternExpression() + ( + { isStrictEnds = SqlLiteral.createBoolean(true, getPos()); } + | { isStrictEnds = SqlLiteral.createBoolean(false, getPos()); } + ) + + ( + interval = IntervalLiteral() + | { interval = null; } + ) + ( + subsetList = SubsetDefinitionCommaList(span()) + | { subsetList = SqlNodeList.EMPTY; } + ) + + patternDefList = PatternDefinitionCommaList(span()) + { + return new SqlMatchRecognize(s.end(this), tableRef, + pattern, isStrictStarts, isStrictEnds, patternDefList, measureList, + after, subsetList, rowsPerMatch, partitionList, orderList, interval); + } +} + +SqlNodeList MeasureColumnCommaList(Span s) : +{ + final List list = new ArrayList(); +} +{ + AddMeasureColumn(list) + ( AddMeasureColumn(list) )* + { return new SqlNodeList(list, s.addAll(list).pos()); } +} + +void AddMeasureColumn(List list) : +{ + final SqlNode e; + final SqlIdentifier alias; +} +{ + e = Expression(ExprContext.ACCEPT_NON_QUERY) + + alias = SimpleIdentifier() { + list.add(SqlStdOperatorTable.AS.createCall(Span.of(e).end(this), e, alias)); + } +} + +SqlNode PatternExpression() : +{ + SqlNode left; + SqlNode right; +} +{ + left = PatternTerm() + ( + + right = PatternTerm() { + left = SqlStdOperatorTable.PATTERN_ALTER.createCall( + Span.of(left).end(right), left, right); + } + )* + { + return left; + } +} + +SqlNode PatternTerm() : +{ + SqlNode left; + SqlNode right; +} +{ + left = PatternFactor() + ( + right = PatternFactor() { + left = SqlStdOperatorTable.PATTERN_CONCAT.createCall( + Span.of(left).end(right), left, right); + } + )* + { + return left; + } +} + +SqlNode PatternFactor() : +{ + final SqlNode e; + final SqlNode extra; + SqlLiteral startNum = null; + SqlLiteral endNum = null; + SqlLiteral reluctant = null; +} +{ + e = PatternPrimary() + ( + LOOKAHEAD(1) + ( + { + startNum = LITERAL_ZERO; + endNum = LITERAL_MINUS_ONE; + } + | + { + startNum = LITERAL_ONE; + endNum = LITERAL_MINUS_ONE; + } + | + { + startNum = LITERAL_ZERO; + endNum = LITERAL_ONE; + } + | + + ( + startNum = UnsignedNumericLiteral() + ( + + ( + endNum = UnsignedNumericLiteral() + | + { endNum = LITERAL_MINUS_ONE; } + ) + | + { endNum = startNum; } + ) + + | + + endNum = UnsignedNumericLiteral() + + { startNum = LITERAL_MINUS_ONE; } + | + extra = PatternExpression() { + return SqlStdOperatorTable.PATTERN_CONCAT.createCall( + Span.of(e).end(this), e, + SqlStdOperatorTable.PATTERN_EXCLUDE.createCall( + Span.of(extra).end(this), extra)); + } + ) + ) + ( + { + reluctant = SqlLiteral.createBoolean( + startNum.intValue(true) != endNum.intValue(true), + SqlParserPos.ZERO); + } + | + { reluctant = SqlLiteral.createBoolean(false, SqlParserPos.ZERO); } + ) + | + { return e; } + ) + { + return SqlStdOperatorTable.PATTERN_QUANTIFIER.createCall( + span().end(e), e, startNum, endNum, reluctant); + } +} + +SqlNode PatternPrimary() : +{ + final Span s; + SqlNode e; + final List list; +} +{ + e = SimpleIdentifier() { return e; } +| + e = PatternExpression() { return e; } +| + { s = span(); } + e = PatternExpression() + { + return SqlStdOperatorTable.PATTERN_EXCLUDE.createCall(s.end(this), e); + } +| + ( + { s = span(); list = new ArrayList(); } + + e = PatternExpression() { list.add(e); } + ( e = PatternExpression() { list.add(e); } )* + { + return SqlStdOperatorTable.PATTERN_PERMUTE.createCall( + s.end(this), list); + } + ) +} + +SqlNodeList SubsetDefinitionCommaList(Span s) : +{ + final List list = new ArrayList(); +} +{ + AddSubsetDefinition(list) + ( AddSubsetDefinition(list) )* + { return new SqlNodeList(list, s.addAll(list).pos()); } +} + +void AddSubsetDefinition(List list) : +{ + final SqlNode var; + final SqlNodeList varList; +} +{ + var = SimpleIdentifier() + + + varList = ExpressionCommaList(span(), ExprContext.ACCEPT_NON_QUERY) + { + list.add( + SqlStdOperatorTable.EQUALS.createCall(span().end(var), var, + varList)); + } +} + +SqlNodeList PatternDefinitionCommaList(Span s) : +{ + SqlNode e; + final List eList = new ArrayList(); +} +{ + e = PatternDefinition() { + eList.add(e); + } + ( + + e = PatternDefinition() { + eList.add(e); + } + )* + { + return new SqlNodeList(eList, s.addAll(eList).pos()); + } +} + +SqlNode PatternDefinition() : +{ + final SqlNode var; + final SqlNode e; +} +{ + var = SimpleIdentifier() + + e = Expression(ExprContext.ACCEPT_SUB_QUERY) { + return SqlStdOperatorTable.AS.createCall(Span.of(var, e).pos(), e, var); + } +} + +// ---------------------------------------------------------------------------- +// Expressions + +/** + * Parses a SQL expression (such as might occur in a WHERE clause) followed by + * the end-of-file symbol. + */ +SqlNode SqlExpressionEof() : +{ + SqlNode e; +} +{ + e = Expression(ExprContext.ACCEPT_SUB_QUERY) () + { + return e; + } +} + +/** + * Parses either a row expression or a query expression without ORDER BY. + * + *

Examples of valid queries: + *

    + *
  • {@code SELECT c FROM t} + *
  • {@code SELECT c} (valid in some dialects) + *
  • {@code SELECT c FROM t UNION SELECT c2 FROM t2} + *
  • {@code WITH q AS (SELECT 1) SELECT * FROM q} + *
  • {@code VALUES (1, 2)} + *
  • {@code TABLE t} + *
+ * + *

Non-examples: + *

    + *
  • {@code emp CROSS JOIN dept} + *
  • {@code SELECT c FROM t ORDER BY c} + *
  • {@code (SELECT c FROM t)} + *
+ */ +SqlNode QueryOrExpr(ExprContext exprContext) : +{ + SqlNodeList withList = null; + final SqlNode e; + final List list = new ArrayList(); +} +{ + [ withList = WithList() ] + e = LeafQueryOrExpr(exprContext) { list.add(e); } + ( AddSetOpQuery(list, exprContext) )* + { return addWith(withList, SqlParserUtil.toTree(list)); } +} + +SqlNode Query(ExprContext exprContext) : +{ + SqlNodeList withList = null; + final SqlNode e; + final List list = new ArrayList(); +} +{ + [ withList = WithList() ] + e = LeafQuery(exprContext) { list.add(e); } + ( AddSetOpQuery(list, exprContext) )* + { return addWith(withList, SqlParserUtil.toTree(list)); } +} + +JAVACODE SqlNode addWith(SqlNodeList withList, SqlNode e) { + return withList == null + ? e + : new SqlWith(withList.getParserPosition(), withList, e); +} + +/** Parses a set operator (e.g. UNION or INTERSECT) + * followed by a query or expression, + * and adds both to {@code list}. */ +void AddSetOpQueryOrExpr(List list, ExprContext exprContext) : +{ + final SqlOperator op; + final SqlParserPos pos; + final SqlNode e; +} +{ + { + if (list.size() == 1 && !((SqlNode) list.get(0)).isA(SqlKind.QUERY)) { + // whoops, expression we just parsed wasn't a query, + // but we're about to see something like UNION, so + // force an exception retroactively + checkNonQueryExpression(ExprContext.ACCEPT_QUERY); + } + } + op = BinaryQueryOperator() { + // ensure a query is legal in this context + pos = getPos(); + checkQueryExpression(exprContext); + } + e = LeafQueryOrExpr(ExprContext.ACCEPT_QUERY) { + list.add(new SqlParserUtil.ToTreeListItem(op, pos)); + list.add(e); + } +} + +/** Parses a set operator (e.g. UNION or INTERSECT) + * followed by a query, + * and adds both to {@code list}. */ +void AddSetOpQuery(List list, ExprContext exprContext) : +{ + final SqlOperator op; + final SqlParserPos pos; + final SqlNode e; +} +{ + { + if (list.size() == 1 && !((SqlNode) list.get(0)).isA(SqlKind.QUERY)) { + // whoops, expression we just parsed wasn't a query, + // but we're about to see something like UNION, so + // force an exception retroactively + checkNonQueryExpression(ExprContext.ACCEPT_QUERY); + } + } + op = BinaryQueryOperator() { + // ensure a query is legal in this context + pos = getPos(); + checkQueryExpression(exprContext); + } + e = LeafQueryOrExpr(ExprContext.ACCEPT_QUERY) { + list.add(new SqlParserUtil.ToTreeListItem(op, pos)); + list.add(e); + } +} + +SqlNodeList WithList() : +{ + final Span s; + final List list = new ArrayList(); + boolean recursive = false; +} +{ + [ { recursive = true; } ]{ s = span(); } + AddWithItem(list, SqlLiteral.createBoolean(recursive, getPos())) + ( AddWithItem(list, SqlLiteral.createBoolean(recursive, getPos())) )* + { return new SqlNodeList(list, s.end(this)); } +} + +void AddWithItem(List list, SqlLiteral recursive) : +{ + final SqlIdentifier id; + final SqlNodeList columnList; + final SqlNode definition; +} +{ + id = SimpleIdentifier() + ( columnList = ParenthesizedSimpleIdentifierList() | { columnList = null; } ) + + definition = ParenthesizedExpression(ExprContext.ACCEPT_QUERY) + { list.add(new SqlWithItem(id.getParserPosition(), id, columnList, definition, recursive)); } +} + +/** + * Parses either a row expression, a leaf query expression, or + * a parenthesized expression of any kind. + */ +SqlNode LeafQueryOrExpr(ExprContext exprContext) : +{ + SqlNode e; +} +{ + e = LeafQuery(exprContext) { return e; } +| + e = Expression(exprContext) { return e; } +} + +/** As {@link #Expression} but appends to a list. */ +void AddExpression(List list, ExprContext exprContext) : +{ + final SqlNode e; +} +{ + e = Expression(exprContext) { list.add(e); } +} + +/** + * Parses a row expression or a parenthesized expression of any kind. + */ +SqlNode Expression(ExprContext exprContext) : +{ + final List list; +} +{ + list = Expression2(exprContext) { return SqlParserUtil.toTree(list); } +} + +void AddExpression2b(List list, ExprContext exprContext) : +{ + SqlNode e; + SqlOperator op; + SqlNode ext; +} +{ + ( + LOOKAHEAD(1) + op = PrefixRowOperator() { + checkNonQueryExpression(exprContext); + list.add(new SqlParserUtil.ToTreeListItem(op, getPos())); + } + )* + e = Expression3(exprContext) { + list.add(e); + } + ( + LOOKAHEAD(2) + ext = RowExpressionExtension() { + list.add( + new SqlParserUtil.ToTreeListItem( + SqlStdOperatorTable.DOT, getPos())); + list.add(ext); + } + )* +} + +/** + * Parses a binary row expression, or a parenthesized expression of any + * kind. + * + *

The result is as a flat list of operators and operands. The top-level + * call to get an expression should call {@link #Expression}, but lower-level + * calls should call this, to give the parser the opportunity to associate + * operator calls. + * + *

For example 'a = b like c = d' should come out '((a = b) like c) = d' + * because LIKE and '=' have the same precedence, but tends to come out as '(a + * = b) like (c = d)' because (a = b) and (c = d) are parsed as separate + * expressions. + */ +List Expression2(ExprContext exprContext) : +{ + final List list = new ArrayList(); + List list2; + final List list3 = new ArrayList(); + SqlNodeList nodeList; + SqlNode e; + SqlOperator itemOp; + SqlOperator op; + SqlIdentifier p; + final Span s = span(); +} +{ + AddExpression2b(list, exprContext) + ( + LOOKAHEAD(2) + ( + LOOKAHEAD(2) + ( + // Special case for "IN", because RHS of "IN" is the only place + // that an expression-list is allowed ("exp IN (exp1, exp2)"). + LOOKAHEAD(2) { + checkNonQueryExpression(exprContext); + } + ( + { op = SqlStdOperatorTable.NOT_IN; } + | + { op = SqlStdOperatorTable.IN; } + | + { final SqlKind k; } + k = comp() + ( + { op = SqlStdOperatorTable.some(k); } + | + { op = SqlStdOperatorTable.some(k); } + | + { op = SqlStdOperatorTable.all(k); } + ) + ) + { s.clear().add(this); } + nodeList = ParenthesizedQueryOrCommaList(ExprContext.ACCEPT_NONCURSOR) + { + list.add(new SqlParserUtil.ToTreeListItem(op, s.pos())); + s.add(nodeList); + // special case for stuff like IN (s1 UNION s2) + if (nodeList.size() == 1) { + SqlNode item = nodeList.get(0); + if (item.isA(SqlKind.QUERY)) { + list.add(item); + } else { + list.add(nodeList); + } + } else { + list.add(nodeList); + } + } + | + LOOKAHEAD(2) { + checkNonQueryExpression(exprContext); + } + ( + { + op = SqlStdOperatorTable.NOT_BETWEEN; + s.clear().add(this); + } + [ + { op = SqlStdOperatorTable.SYMMETRIC_NOT_BETWEEN; } + | + + ] + | + + { + op = SqlStdOperatorTable.BETWEEN; + s.clear().add(this); + } + [ + { op = SqlStdOperatorTable.SYMMETRIC_BETWEEN; } + | + + ] + ) + AddExpression2b(list3, ExprContext.ACCEPT_SUB_QUERY) { + list.add(new SqlParserUtil.ToTreeListItem(op, s.pos())); + list.addAll(list3); + list3.clear(); + } + | + LOOKAHEAD(2) { + checkNonQueryExpression(exprContext); + s.clear().add(this); + } + ( + ( + + ( + { op = SqlStdOperatorTable.NOT_LIKE; } + | + { op = SqlLibraryOperators.NOT_ILIKE; } + | + { op = SqlLibraryOperators.NOT_RLIKE; } + | + { op = SqlStdOperatorTable.NOT_SIMILAR_TO; } + ) + | + { op = SqlStdOperatorTable.LIKE; } + | + { op = SqlLibraryOperators.ILIKE; } + | + { op = SqlLibraryOperators.RLIKE; } + | + { op = SqlStdOperatorTable.SIMILAR_TO; } + ) + <#if (parser.includePosixOperators!default.parser.includePosixOperators)> + | + { op = SqlStdOperatorTable.NEGATED_POSIX_REGEX_CASE_SENSITIVE; } + [ { op = SqlStdOperatorTable.NEGATED_POSIX_REGEX_CASE_INSENSITIVE; } ] + | + { op = SqlStdOperatorTable.POSIX_REGEX_CASE_SENSITIVE; } + [ { op = SqlStdOperatorTable.POSIX_REGEX_CASE_INSENSITIVE; } ] + + ) + list2 = Expression2(ExprContext.ACCEPT_SUB_QUERY) { + list.add(new SqlParserUtil.ToTreeListItem(op, s.pos())); + list.addAll(list2); + } + [ + LOOKAHEAD(2) + e = Expression3(ExprContext.ACCEPT_SUB_QUERY) { + s.clear().add(this); + list.add( + new SqlParserUtil.ToTreeListItem( + SqlStdOperatorTable.ESCAPE, s.pos())); + list.add(e); + } + ] + | + <#list (parser.extraBinaryExpressions!default.parser.extraBinaryExpressions) as extra > + ${extra}(list, exprContext, s) + | + + LOOKAHEAD(3) op = BinaryRowOperator() { + checkNonQueryExpression(exprContext); + list.add(new SqlParserUtil.ToTreeListItem(op, getPos())); + } + AddExpression2b(list, ExprContext.ACCEPT_SUB_QUERY) + | + + ( { itemOp = SqlLibraryOperators.OFFSET; } { e = Expression(ExprContext.ACCEPT_SUB_QUERY); } + | { itemOp = SqlLibraryOperators.ORDINAL; } { e = Expression(ExprContext.ACCEPT_SUB_QUERY); } + | { itemOp = SqlLibraryOperators.SAFE_OFFSET; } { e = Expression(ExprContext.ACCEPT_SUB_QUERY); } + | { itemOp = SqlLibraryOperators.SAFE_ORDINAL; } { e = Expression(ExprContext.ACCEPT_SUB_QUERY); } + | { itemOp = SqlStdOperatorTable.ITEM; } e = Expression(ExprContext.ACCEPT_SUB_QUERY) + ) + { + list.add( + new SqlParserUtil.ToTreeListItem( + itemOp, getPos())); + list.add(e); + } + ( + LOOKAHEAD(2) + p = SimpleIdentifier() { + list.add( + new SqlParserUtil.ToTreeListItem( + SqlStdOperatorTable.DOT, getPos())); + list.add(p); + } + )* + | + { + checkNonQueryExpression(exprContext); + } + op = PostfixRowOperator() { + list.add(new SqlParserUtil.ToTreeListItem(op, getPos())); + } + ) + )+ + { + return list; + } + | + { + return list; + } + ) +} + +/** Parses a comparison operator inside a SOME / ALL predicate. */ +SqlKind comp() : +{ +} +{ + { return SqlKind.LESS_THAN; } +| + { return SqlKind.LESS_THAN_OR_EQUAL; } +| + { return SqlKind.GREATER_THAN; } +| + { return SqlKind.GREATER_THAN_OR_EQUAL; } +| + { return SqlKind.EQUALS; } +| + { return SqlKind.NOT_EQUALS; } +| + { + if (!this.conformance.isBangEqualAllowed()) { + throw SqlUtil.newContextException(getPos(), RESOURCE.bangEqualNotAllowed()); + } + return SqlKind.NOT_EQUALS; + } +} + +/** + * Parses a unary row expression, or a parenthesized expression of any + * kind. + */ +SqlNode Expression3(ExprContext exprContext) : +{ + final SqlNode e; + final SqlNodeList list; + final SqlNodeList list1; + final Span s; + final Span rowSpan; +} +{ + LOOKAHEAD(2) + e = AtomicRowExpression() + { + checkNonQueryExpression(exprContext); + return e; + } +| + e = CursorExpression(exprContext) { return e; } +| + LOOKAHEAD(3) + { + s = span(); + } + list = ParenthesizedQueryOrCommaList(exprContext) { + if (exprContext != ExprContext.ACCEPT_ALL + && exprContext != ExprContext.ACCEPT_CURSOR + && !this.conformance.allowExplicitRowValueConstructor()) + { + throw SqlUtil.newContextException(s.end(list), + RESOURCE.illegalRowExpression()); + } + return SqlStdOperatorTable.ROW.createCall(list); + } +| + ( + { rowSpan = span(); } + | { rowSpan = null; } + ) + list1 = ParenthesizedQueryOrCommaList(exprContext) { + if (rowSpan != null) { + // interpret as row constructor + return SqlStdOperatorTable.ROW.createCall(rowSpan.end(list1), + (List) list1); + } + } + [ + LOOKAHEAD(2) + /* TODO: + ( + op = periodOperator() + list2 = ParenthesizedQueryOrCommaList(exprContext) + { + if (list1.size() != 2 || list2.size() != 2) { + throw SqlUtil.newContextException( + list1.getParserPosition().plus( + list2.getParserPosition()), + RESOURCE.illegalOverlaps()); + } + for (SqlNode node : list2) { + list1.add(node); + } + return op.createCall( + list1.getParserPosition().plus(list2.getParserPosition()), + list1.toArray()); + } + ) + | + */ + ( + e = IntervalQualifier() + { + if ((list1.size() == 1) + && list1.get(0) instanceof SqlCall) + { + final SqlCall call = (SqlCall) list1.get(0); + if (call.getKind() == SqlKind.MINUS + && call.operandCount() == 2) { + return SqlStdOperatorTable.MINUS_DATE.createCall( + Span.of(list1).end(this), call.operand(0), + call.operand(1), e); + } + } + throw SqlUtil.newContextException(span().end(list1), + RESOURCE.illegalMinusDate()); + } + ) + ] + { + if (list1.size() == 1) { + // interpret as single value or query + return list1.get(0).clone(list1.getParserPosition()); + } else { + // interpret as row constructor + return SqlStdOperatorTable.ROW.createCall(span().end(list1), + (List) list1); + } + } +} + +/** + * Parses a lambda expression. + */ +SqlNode LambdaExpression() : +{ + final SqlNodeList parameters; + final SqlNode expression; + final Span s; +} +{ + parameters = SimpleIdentifierOrListOrEmpty() + { s = span(); } + expression = Expression(ExprContext.ACCEPT_NON_QUERY) + { + return new SqlLambda(s.end(this), parameters, expression); + } +} + +/** + * List of simple identifiers in parentheses or empty parentheses or one simple identifier. + *
    Examples: + *
  • {@code ()} + *
  • {@code DEPTNO} + *
  • {@code (EMPNO, DEPTNO)} + *
+ */ +SqlNodeList SimpleIdentifierOrListOrEmpty() : +{ + SqlNodeList list; +} +{ + LOOKAHEAD(2) + { return SqlNodeList.EMPTY; } +| + list = SimpleIdentifierOrList() { return list; } +} + +SqlOperator periodOperator() : +{ +} +{ + { return SqlStdOperatorTable.OVERLAPS; } +| + LOOKAHEAD(2) + { return SqlStdOperatorTable.IMMEDIATELY_PRECEDES; } +| + { return SqlStdOperatorTable.PRECEDES; } +| + { return SqlStdOperatorTable.IMMEDIATELY_SUCCEEDS; } +| + { return SqlStdOperatorTable.SUCCEEDS; } +| + { return SqlStdOperatorTable.PERIOD_EQUALS; } +} + +/** + * Parses a COLLATE clause + */ +SqlCollation CollateClause() : +{ +} +{ + + { + return new SqlCollation( + getToken(0).image, SqlCollation.Coercibility.EXPLICIT); + } +} + +/** + * Numeric literal or parameter; used in LIMIT, OFFSET and FETCH clauses. + */ +SqlNode UnsignedNumericLiteralOrParam() : +{ + final SqlNode e; +} +{ + ( + e = UnsignedNumericLiteral() + | + e = DynamicParam() + ) + { return e; } +} + +/** + * Parses a row expression extension, it can be either an identifier, + * or a call to a named function. + */ +SqlNode RowExpressionExtension() : +{ + final SqlFunctionCategory funcType = SqlFunctionCategory.USER_DEFINED_FUNCTION; + final SqlIdentifier p; + final Span s; + final List args; + final SqlLiteral quantifier; +} +{ + p = SimpleIdentifier() + ( + LOOKAHEAD( ) { s = span(); } + ( + LOOKAHEAD(2) { + quantifier = null; + args = ImmutableList.of(SqlIdentifier.star(getPos())); + } + + | + LOOKAHEAD(2) { + quantifier = null; + args = ImmutableList.of(); + } + | + args = FunctionParameterList(ExprContext.ACCEPT_SUB_QUERY) { + quantifier = (SqlLiteral) args.get(0); + args.remove(0); + } + ) + { return createCall(p, s.end(this), funcType, quantifier, args); } + | + { return p; } + ) +} + +/** + * Parses a call to the STRING_AGG aggregate function (or to an aggregate + * function with similar syntax: ARRAY_AGG, ARRAY_CONCAT_AGG, GROUP_CONCAT). + */ +SqlCall StringAggFunctionCall() : +{ + final Span s, s2; + final SqlOperator op; + final List args = new ArrayList(); + final SqlLiteral qualifier; + final SqlNodeList orderBy; + final Pair nullTreatment; + final SqlNode separator; +} +{ + ( + { s = span(); op = SqlLibraryOperators.ARRAY_AGG; } + | { s = span(); op = SqlLibraryOperators.ARRAY_CONCAT_AGG; } + | { s = span(); op = SqlLibraryOperators.GROUP_CONCAT; } + | { s = span(); op = SqlLibraryOperators.STRING_AGG; } + ) + + ( + qualifier = AllOrDistinct() + | { qualifier = null; } + ) + AddArg(args, ExprContext.ACCEPT_SUB_QUERY) + ( + { + // a comma-list can't appear where only a query is expected + // TODO: the following line is a no-op; remove it? + checkNonQueryExpression(ExprContext.ACCEPT_SUB_QUERY); + } + AddArg(args, ExprContext.ACCEPT_SUB_QUERY) + )* + ( + nullTreatment = NullTreatment() + | { nullTreatment = null; } + ) + [ + orderBy = OrderBy(true) { + args.add(orderBy); + } + ] + [ + { s2 = span(); } separator = StringLiteral() { + args.add(SqlInternalOperators.SEPARATOR.createCall(s2.end(this), separator)); + } + ] + + { + SqlCall call = op.createCall(qualifier, s.end(this), args); + if (nullTreatment != null) { + // Wrap in RESPECT_NULLS or IGNORE_NULLS. + call = nullTreatment.right.createCall(nullTreatment.left, call); + } + return call; + } +} + +/** + * Parses both the standard and the BigQuery PERCENTILE_CONT/PERCENTILE_DISC + * functions. + * + *

The standard is of the form "PERCENTILE_CONT(fraction)" while BigQuery is + * of the form "PERCENTILE_CONT(value, fraction [ {RESPECT | IGNORE} NULLS ] )". + * Handles the parsing of the operator and its operands but not the WITHIN GROUP + * (for the standard) or OVER (for BigQuery) clauses. + */ +SqlCall PercentileFunctionCall() : +{ + final Span s; + SqlOperator op; + final SqlNode e; + final List args = new ArrayList(); + final Pair nullTreatment; +} +{ + ( + { op = SqlStdOperatorTable.PERCENTILE_CONT; } + | { op = SqlStdOperatorTable.PERCENTILE_DISC; } + ) + { s = span(); } + + AddArg(args, ExprContext.ACCEPT_SUB_QUERY) + ( + { + return op.createCall(s.end(this), args); + } + | + + e = NumericLiteral() { args.add(e); } + ( + nullTreatment = NullTreatment() + | { nullTreatment = null; } + ) + + { + op = + op == SqlStdOperatorTable.PERCENTILE_CONT + ? SqlLibraryOperators.PERCENTILE_CONT2 + : SqlLibraryOperators.PERCENTILE_DISC2; + SqlCall call = op.createCall(s.end(this), args); + if (nullTreatment != null) { + // Wrap in RESPECT_NULLS or IGNORE_NULLS. + call = nullTreatment.right.createCall(nullTreatment.left, call); + } + return call; + } + ) +} + + +/** + * Parses an atomic row expression. + */ +SqlNode AtomicRowExpression() : +{ + final SqlNode e; +} +{ + ( + LOOKAHEAD(2) + e = LiteralOrIntervalExpression() + | + e = DynamicParam() + | + LOOKAHEAD(2) + e = BuiltinFunctionCall() + | + e = JdbcFunctionCall() + | + e = MultisetConstructor() + | + e = ArrayConstructor() + | + LOOKAHEAD(3) + e = MapConstructor() + | + e = PeriodConstructor() + | + // NOTE jvs 18-Jan-2005: use syntactic lookahead to discriminate + // compound identifiers from function calls in which the function + // name is a compound identifier + LOOKAHEAD( [] FunctionName() ) + e = NamedFunctionCall() + | + e = ContextVariable() + | + e = CompoundIdentifier() + | + e = NewSpecification() + | + e = CaseExpression() + | + e = SequenceExpression() + ) + { return e; } +} + +SqlNode CaseExpression() : +{ + final Span whenSpan = Span.of(); + final Span thenSpan = Span.of(); + final Span s; + SqlNode e; + final SqlNode caseIdentifier; + final SqlNode elseClause; + final List whenList = new ArrayList(); + final List thenList = new ArrayList(); +} +{ + { s = span(); } + ( + caseIdentifier = Expression(ExprContext.ACCEPT_SUB_QUERY) + | { caseIdentifier = null; } + ) + ( + { whenSpan.add(this); } + e = ExpressionCommaList(s, ExprContext.ACCEPT_SUB_QUERY) { + if (((SqlNodeList) e).size() == 1) { + e = ((SqlNodeList) e).get(0); + } + whenList.add(e); + } + { thenSpan.add(this); } + e = Expression(ExprContext.ACCEPT_SUB_QUERY) { + thenList.add(e); + } + )+ + ( + elseClause = Expression(ExprContext.ACCEPT_SUB_QUERY) + | { elseClause = null; } + ) + { + return SqlCase.createSwitched(s.end(this), caseIdentifier, + new SqlNodeList(whenList, whenSpan.addAll(whenList).pos()), + new SqlNodeList(thenList, thenSpan.addAll(thenList).pos()), + elseClause); + } +} + +SqlCall SequenceExpression() : +{ + final Span s; + final SqlOperator f; + final SqlNode sequenceRef; +} +{ + ( + { f = SqlStdOperatorTable.NEXT_VALUE; s = span(); } + | + LOOKAHEAD(3) + { f = SqlStdOperatorTable.CURRENT_VALUE; s = span(); } + ) + sequenceRef = CompoundIdentifier() { + return f.createCall(s.end(sequenceRef), sequenceRef); + } +} + +/** + * Parses "SET <NAME> = VALUE" or "RESET <NAME>", without a leading + * "ALTER <SCOPE>". + */ +SqlSetOption SqlSetOption(Span s, String scope) : +{ + SqlIdentifier name; + final SqlNode val; +} +{ + ( + { + s.add(this); + } + name = CompoundIdentifier() + + ( + val = Literal() + | + val = SimpleIdentifier() + | + { + // OFF is handled by SimpleIdentifier, ON handled here. + val = new SqlIdentifier(token.image.toUpperCase(Locale.ROOT), + getPos()); + } + ) + { + return new SqlSetOption(s.end(val), scope, (SqlNode) name, val); + } + | + { + s.add(this); + } + ( + name = CompoundIdentifier() + | + { + name = new SqlIdentifier(token.image.toUpperCase(Locale.ROOT), + getPos()); + } + ) + { + return new SqlSetOption(s.end(name), scope, (SqlNode) name, null); + } + ) +} + +/** + * Parses an expression for setting or resetting an option in SQL, such as QUOTED_IDENTIFIERS, + * or explain plan level (physical/logical). + */ +SqlAlter SqlAlter() : +{ + final Span s; + final String scope; + final SqlAlter alterNode; +} +{ + { s = span(); } + scope = Scope() + ( +<#-- additional literal parser methods are included here --> +<#list (parser.alterStatementParserMethods!default.parser.alterStatementParserMethods) as method> + alterNode = ${method}(s, scope) + | + + alterNode = ${parser.setOptionParserMethod!default.parser.setOptionParserMethod}(s, scope) + ) + { + return alterNode; + } +} + +String Scope() : +{ +} +{ + ( | ) { return token.image.toUpperCase(Locale.ROOT); } +} + +<#if (parser.createStatementParserMethods!default.parser.createStatementParserMethods)?size != 0> +/** + * Parses a CREATE statement. + */ +SqlCreate SqlCreate() : +{ + final Span s; + boolean replace = false; + final SqlCreate create; +} +{ + { s = span(); } + [ + { + replace = true; + } + ] + ( +<#-- additional literal parser methods are included here --> +<#list (parser.createStatementParserMethods!default.parser.createStatementParserMethods) as method> + create = ${method}(s, replace) + <#sep>| LOOKAHEAD(2) + + ) + { + return create; + } +} + + +<#if (parser.dropStatementParserMethods!default.parser.dropStatementParserMethods)?size != 0> +/** + * Parses a DROP statement. + */ +SqlDrop SqlDrop() : +{ + final Span s; + boolean replace = false; + final SqlDrop drop; +} +{ + { s = span(); } + ( +<#-- additional literal parser methods are included here --> +<#list (parser.dropStatementParserMethods!default.parser.dropStatementParserMethods) as method> + drop = ${method}(s, replace) + <#sep>| + + ) + { + return drop; + } +} + + +<#if (parser.truncateStatementParserMethods!default.parser.truncateStatementParserMethods)?size != 0> +/** + * Parses a TRUNCATE statement. + */ +SqlTruncate SqlTruncate() : +{ + final Span s; + final SqlTruncate truncate; +} +{ + { s = span(); } + ( +<#-- additional literal parser methods are included here --> +<#list (parser.truncateStatementParserMethods!default.parser.truncateStatementParserMethods) as method> + truncate = ${method}(s) + <#sep>| + + ) + { + return truncate; + } +} + + +/** + * Parses a literal expression, allowing continued string literals. + * Usually returns an SqlLiteral, but a continued string literal + * is an SqlCall expression, which concatenates 2 or more string + * literals; the validator reduces this. + * + *

If the context allows both literals and expressions, + * use {@link #LiteralOrIntervalExpression}, which requires less + * lookahead. + */ +SqlNode Literal() : +{ + SqlNode e; +} +{ + ( + e = NonIntervalLiteral() + | + e = IntervalLiteral() + ) + { return e; } +} + +/** Parses a literal that is not an interval literal. */ +SqlNode NonIntervalLiteral() : +{ + final SqlNode e; +} +{ + ( + e = NumericLiteral() + | + e = StringLiteral() + | + e = SpecialLiteral() + | + e = DateTimeLiteral() +<#-- additional literal parser methods are included here --> +<#list (parser.literalParserMethods!default.parser.literalParserMethods) as method> + | + e = ${method} + + ) + { + return e; + } +} + +/** Parses a literal or an interval expression. + * + *

We include them in the same production because it is difficult to + * distinguish interval literals from interval expression (both of which + * start with the {@code INTERVAL} keyword); this way, we can use less + * LOOKAHEAD. */ +SqlNode LiteralOrIntervalExpression() : +{ + final SqlNode e; +} +{ + ( + e = IntervalLiteralOrExpression() + | + e = NonIntervalLiteral() + ) + { return e; } +} + +/** Parses a unsigned numeric literal */ +SqlNumericLiteral UnsignedNumericLiteral() : +{ +final String p; +} +{ + { + return SqlLiteral.createExactNumeric(token.image, getPos()); + } +| + { + return SqlLiteral.createExactNumeric(token.image, getPos()); + } +| + + p = SimpleStringLiteral() { + return SqlParserUtil.parseDecimalLiteral(SqlParserUtil.trim(p, " "), getPos()); + } +| + { + return SqlLiteral.createApproxNumeric(token.image, getPos()); + } +} + +/** Parses a numeric literal (can be signed) */ +SqlLiteral NumericLiteral() : +{ + final SqlNumericLiteral num; + final Span s; +} +{ + num = UnsignedNumericLiteral() { + return num; + } +| + { s = span(); } num = UnsignedNumericLiteral() { + return SqlLiteral.createNegative(num, s.end(this)); + } +| + num = UnsignedNumericLiteral() { + return num; + } +} + +/** Parse a special literal keyword */ +SqlLiteral SpecialLiteral() : +{ +} +{ + { return SqlLiteral.createBoolean(true, getPos()); } +| + { return SqlLiteral.createBoolean(false, getPos()); } +| + { return SqlLiteral.createUnknown(getPos()); } +| + { return SqlLiteral.createNull(getPos()); } +} + +/** + * Parses a string literal. The literal may be continued onto several + * lines. For a simple literal, the result is an SqlLiteral. For a continued + * literal, the result is an SqlCall expression, which concatenates 2 or more + * string literals; the validator reduces this. + * + * @see SqlLiteral#unchain(SqlNode) + * @see SqlLiteral#stringValue(SqlNode) + * + * @return a literal expression + */ +SqlNode StringLiteral() : +{ + String p; + final List frags; + char unicodeEscapeChar = 0; + String charSet = null; + SqlCharStringLiteral literal; +} +{ + // A continued string literal consists of a head fragment and one or more + // tail fragments. Since comments may occur between the fragments, and + // comments are special tokens, each fragment is a token. But since spaces + // or comments may not occur between the prefix and the first quote, the + // head fragment, with any prefix, is one token. + + + { + frags = new ArrayList(); + try { + p = SqlParserUtil.trim(token.image, "xX'"); + frags.add(SqlLiteral.createBinaryString(p, getPos())); + } catch (NumberFormatException ex) { + throw SqlUtil.newContextException(getPos(), + RESOURCE.illegalBinaryString(token.image)); + } + } + ( + // The grammar is ambiguous when a continued literals and a character + // string alias are both possible. For example, in + // SELECT x'01'\n'ab' + // we prefer that 'ab' continues the literal, and is not an alias. + // The following LOOKAHEAD mutes the warning about ambiguity. + LOOKAHEAD(1) + + { + try { + p = SqlParserUtil.trim(token.image, "'"); // no embedded quotes + frags.add(SqlLiteral.createBinaryString(p, getPos())); + } catch (NumberFormatException ex) { + throw SqlUtil.newContextException(getPos(), + RESOURCE.illegalBinaryString(token.image)); + } + } + )* + { + assert !frags.isEmpty(); + if (frags.size() == 1) { + return frags.get(0); // just the head fragment + } else { + SqlParserPos pos2 = SqlParserPos.sum(frags); + return SqlStdOperatorTable.LITERAL_CHAIN.createCall(pos2, frags); + } + } +| + ( + + { charSet = SqlParserUtil.getCharacterSet(token.image); } + | + | { + // TODO jvs 2-Feb-2009: support the explicit specification of + // a character set for Unicode string literals, per SQL:2003 + unicodeEscapeChar = BACKSLASH; + charSet = "UTF16"; + } + ) + { + frags = new ArrayList(); + p = SqlParserUtil.parseString(token.image); + try { + literal = SqlLiteral.createCharString(p, charSet, getPos()); + frags.add(literal); + } catch (java.nio.charset.UnsupportedCharsetException e) { + throw SqlUtil.newContextException(getPos(), + RESOURCE.unknownCharacterSet(charSet)); + } + } + ( + // The grammar is ambiguous when a continued literals and a character + // string alias are both possible. For example, in + // SELECT 'taxi'\n'cab' + // we prefer that 'cab' continues the literal, and is not an alias. + // The following LOOKAHEAD mutes the warning about ambiguity. + LOOKAHEAD(1) + + { + p = SqlParserUtil.parseString(token.image); + try { + literal = SqlLiteral.createCharString(p, charSet, getPos()); + frags.add(literal); + } catch (java.nio.charset.UnsupportedCharsetException e) { + throw SqlUtil.newContextException(getPos(), + RESOURCE.unknownCharacterSet(charSet)); + } + } + )* + [ + + { + if (unicodeEscapeChar == 0) { + throw SqlUtil.newContextException(getPos(), + RESOURCE.unicodeEscapeUnexpected()); + } + String s = SqlParserUtil.parseString(token.image); + unicodeEscapeChar = SqlParserUtil.checkUnicodeEscapeChar(s); + } + ] + { + assert !frags.isEmpty(); + if (frags.size() == 1) { + // just the head fragment + SqlLiteral lit = (SqlLiteral) frags.get(0); + return lit.unescapeUnicode(unicodeEscapeChar); + } else { + SqlNode[] rands = (SqlNode[]) frags.toArray(new SqlNode[0]); + for (int i = 0; i < rands.length; ++i) { + rands[i] = ((SqlLiteral) rands[i]).unescapeUnicode( + unicodeEscapeChar); + } + SqlParserPos pos2 = SqlParserPos.sum(rands); + return SqlStdOperatorTable.LITERAL_CHAIN.createCall(pos2, rands); + } + } +| + + { + try { + p = SqlParserUtil.parseCString(getToken(0).image); + } catch (SqlParserUtil.MalformedUnicodeEscape e) { + throw SqlUtil.newContextException(getPos(), + RESOURCE.unicodeEscapeMalformed(e.i)); + } + return SqlLiteral.createCharString(p, "UTF16", getPos()); + } +| + + { + p = SqlParserUtil.stripQuotes(getToken(0).image, DQ, DQ, "\\\"", + Casing.UNCHANGED); + try { + return SqlLiteral.createCharString(p, charSet, getPos()); + } catch (java.nio.charset.UnsupportedCharsetException e) { + throw SqlUtil.newContextException(getPos(), + RESOURCE.unknownCharacterSet(charSet)); + } + } +| + + { + p = SqlParserUtil.stripQuotes(getToken(0).image, "'", "'", "\\'", + Casing.UNCHANGED); + try { + return SqlLiteral.createCharString(p, charSet, getPos()); + } catch (java.nio.charset.UnsupportedCharsetException e) { + throw SqlUtil.newContextException(getPos(), + RESOURCE.unknownCharacterSet(charSet)); + } + } +} + +/** Parses a character literal. + * Matches a single-quoted string, such as 'foo'; + * on BigQuery also matches a double-quoted string, such as "foo". + * Returns the value of the string with quotes removed. */ +String SimpleStringLiteral() : +{ +} +{ + { + return SqlParserUtil.parseString(token.image); + } +| + { + return SqlParserUtil.stripQuotes(token.image, "'", "'", "\\'", Casing.UNCHANGED); + } +| + { + return SqlParserUtil.stripQuotes(token.image, DQ, DQ, "\\\"", Casing.UNCHANGED); + } +} + +/** + * Parses a date/time literal. + */ +SqlLiteral DateTimeLiteral() : +{ + final String p; + final Span s; + boolean local = false; +} +{ + { + p = SqlParserUtil.parseString(token.image); + } + { + return SqlParserUtil.parseDateLiteral(p, getPos()); + } +| + { + p = SqlParserUtil.parseString(token.image); + } + { + return SqlParserUtil.parseTimeLiteral(p, getPos()); + } +| + { s = span(); } { + p = SqlParserUtil.parseString(token.image); + } + { + return SqlParserUtil.parseTimestampLiteral(p, s.end(this)); + } +| + { s = span(); } p = SimpleStringLiteral() { + return SqlLiteral.createUnknown("DATE", p, s.end(this)); + } +| + { s = span(); } p = SimpleStringLiteral() { + return SqlLiteral.createUnknown("DATETIME", p, s.end(this)); + } +| + LOOKAHEAD(2) +

The units are used in several functions, incuding CEIL, FLOOR, EXTRACT. + * Includes NANOSECOND, MILLISECOND, which were previously allowed in EXTRACT + * but not CEIL, FLOOR. + * + *

Includes {@code WEEK} and {@code WEEK(SUNDAY)} through + {@code WEEK(SATURDAY)}. + * + *

Does not include SQL_TSI_DAY, SQL_TSI_FRAC_SECOND etc. These will be + * parsed as identifiers and can be resolved in the validator if they are + * registered as abbreviations in your time frame set. + */ +SqlIntervalQualifier TimeUnitOrName() : { + final SqlIdentifier unitName; + final SqlIntervalQualifier intervalQualifier; +} +{ + // When we see a time unit that is also a non-reserved keyword, such as + // NANOSECOND, there is a choice between using the TimeUnit enum + // (TimeUnit.NANOSECOND) or the name. The following LOOKAHEAD directive + // tells the parser that we prefer the former. + // + // Reserved keywords, such as SECOND, cannot be identifiers, and are + // therefore not ambiguous. + LOOKAHEAD(2) + intervalQualifier = TimeUnit() { + return intervalQualifier; + } +| unitName = SimpleIdentifier() { + return new SqlIntervalQualifier(unitName.getSimple(), + unitName.getParserPosition()); + } +} + +/** Parses a built-in time unit (e.g. "YEAR") + * and returns a {@link SqlIntervalQualifier}. + * + *

Includes {@code WEEK} and {@code WEEK(SUNDAY)} through + {@code WEEK(SATURDAY)}. + * + *

Does not include SQL_TSI_DAY, SQL_TSI_FRAC_SECOND etc. These will be + * parsed as identifiers and can be resolved in the validator if they are + * registered as abbreviations in your time frame set. + */ +SqlIntervalQualifier TimeUnit() : { + final Span span; + final String w; +} +{ + { return new SqlIntervalQualifier(TimeUnit.NANOSECOND, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.MICROSECOND, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.MILLISECOND, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.SECOND, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.MINUTE, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.HOUR, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.DAY, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.DOW, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.DOY, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.DOW, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.DOY, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.ISODOW, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.ISOYEAR, null, getPos()); } +| { span = span(); } + ( + // There is a choice between "WEEK(weekday)" and "WEEK". We prefer + // the former, and the parser will look ahead for '('. + LOOKAHEAD(2) + w = weekdayName() { + return new SqlIntervalQualifier(w, span.end(this)); + } + | + { return new SqlIntervalQualifier(TimeUnit.WEEK, null, getPos()); } + ) +| { return new SqlIntervalQualifier(TimeUnit.MONTH, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.QUARTER, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.YEAR, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.EPOCH, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.DECADE, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.CENTURY, null, getPos()); } +| { return new SqlIntervalQualifier(TimeUnit.MILLENNIUM, null, getPos()); } +} + +String weekdayName() : +{ +} +{ + { return "WEEK_SUNDAY"; } +| { return "WEEK_MONDAY"; } +| { return "WEEK_TUESDAY"; } +| { return "WEEK_WEDNESDAY"; } +| { return "WEEK_THURSDAY"; } +| { return "WEEK_FRIDAY"; } +| { return "WEEK_SATURDAY"; } +} + +/** + * Parses a dynamic parameter marker. + */ +SqlDynamicParam DynamicParam() : +{ +} +{ + { + return new SqlDynamicParam(nDynamicParams++, getPos()); + } +} + +/** + * Parses one segment of an identifier that may be composite. + * + *

Each time it reads an identifier it writes one element to each list; + * the entry in {@code positions} records its position and whether the + * segment was quoted. + */ +void AddIdentifierSegment(List names, List positions) : +{ + final String id; + char unicodeEscapeChar = BACKSLASH; + final SqlParserPos pos; + final Span span; +} +{ + ( + { + id = unquotedIdentifier(); + pos = getPos(); + } + | + { + id = unquotedIdentifier(); + pos = getPos(); + } + | + { + id = SqlParserUtil.stripQuotes(getToken(0).image, DQ, DQ, DQDQ, + quotedCasing); + pos = getPos().withQuoting(true); + } + | + { + id = SqlParserUtil.stripQuotes(getToken(0).image, "`", "`", "``", + quotedCasing); + pos = getPos().withQuoting(true); + } + | + { + id = SqlParserUtil.stripQuotes(getToken(0).image, "`", "`", "\\`", + quotedCasing); + pos = getPos().withQuoting(true); + } + | + { + id = SqlParserUtil.stripQuotes(getToken(0).image, "[", "]", "]]", + quotedCasing); + pos = getPos().withQuoting(true); + } + | + { + span = span(); + String image = getToken(0).image; + image = image.substring(image.indexOf('"')); + image = SqlParserUtil.stripQuotes(image, DQ, DQ, DQDQ, quotedCasing); + } + [ + { + String s = SqlParserUtil.parseString(token.image); + unicodeEscapeChar = SqlParserUtil.checkUnicodeEscapeChar(s); + } + ] + { + pos = span.end(this).withQuoting(true); + SqlLiteral lit = SqlLiteral.createCharString(image, "UTF16", pos); + lit = lit.unescapeUnicode(unicodeEscapeChar); + id = lit.toValue(); + } + | + id = NonReservedKeyWord() { + pos = getPos(); + } + ) + { + if (id.length() > this.identifierMaxLength) { + throw SqlUtil.newContextException(pos, + RESOURCE.identifierTooLong(id, this.identifierMaxLength)); + } + names.add(id); + if (positions != null) { + positions.add(pos); + } + } +} + +/** As {@link #AddIdentifierSegment} but part of a table name (for example, + * following {@code FROM}, {@code INSERT} or {@code UPDATE}). + * + *

In some dialects the lexical rules for table names are different from + * for other identifiers. For example, in BigQuery, table names may contain + * hyphens. */ +void AddTableIdentifierSegment(List names, List positions) : +{ +} +{ + AddIdentifierSegment(names, positions) { + final int n = names.size(); + if (n > 0 + && positions.size() == n + && names.get(n - 1).contains(".") + && positions.get(n - 1).isQuoted() + && this.conformance.splitQuotedTableName()) { + final String name = names.remove(n - 1); + final SqlParserPos pos = positions.remove(n - 1); + final String[] splitNames = name.split("\\."); + for (String splitName : splitNames) { + names.add(splitName); + positions.add(pos); + } + } + } +} + +/** + * Parses a simple identifier as a String. + */ +String Identifier() : +{ + final List names = new ArrayList(); +} +{ + AddIdentifierSegment(names, null) { + return names.get(0); + } +} + +/** + * Parses a simple identifier as an SqlIdentifier. + */ +SqlIdentifier SimpleIdentifier() : +{ + final List names = new ArrayList(); + final List positions = new ArrayList(); +} +{ + AddIdentifierSegment(names, positions) { + return new SqlIdentifier(names.get(0), positions.get(0)); + } +} + +/** + * Parses a character literal as an SqlIdentifier. + * Only valid for column aliases in certain dialects. + */ +SqlIdentifier SimpleIdentifierFromStringLiteral() : +{ +} +{ + { + if (!this.conformance.allowCharLiteralAlias()) { + throw SqlUtil.newContextException(getPos(), RESOURCE.charLiteralAliasNotValid()); + } + final String s = SqlParserUtil.parseString(token.image); + return new SqlIdentifier(s, getPos()); + } +} + +/** + * Parses a comma-separated list of simple identifiers. + */ +void AddSimpleIdentifiers(List list) : +{ + SqlIdentifier id; +} +{ + id = SimpleIdentifier() {list.add(id);} + ( + id = SimpleIdentifier() { + list.add(id); + } + )* +} + +/** + * List of simple identifiers in parentheses. The position extends from the + * open parenthesis to the close parenthesis. + */ +SqlNodeList ParenthesizedSimpleIdentifierList() : +{ + final Span s; + final List list = new ArrayList(); +} +{ + { s = span(); } + AddSimpleIdentifiers(list) + { + return new SqlNodeList(list, s.end(this)); + } +} + +/** List of simple identifiers in parentheses or one simple identifier. + * + *

    Examples: + *
  • {@code DEPTNO} + *
  • {@code (EMPNO, DEPTNO)} + *
+ */ +SqlNodeList SimpleIdentifierOrList() : +{ + SqlIdentifier id; + SqlNodeList list; +} +{ + id = SimpleIdentifier() { + return new SqlNodeList(Collections.singletonList(id), id.getParserPosition()); + } +| + list = ParenthesizedSimpleIdentifierList() { + return list; + } +} + +<#if (parser.includeCompoundIdentifier!default.parser.includeCompoundIdentifier) > +/** + * Parses a compound identifier. + */ +SqlIdentifier CompoundIdentifier() : +{ + final List nameList = new ArrayList(); + final List posList = new ArrayList(); + boolean star = false; +} +{ + AddIdentifierSegment(nameList, posList) + ( + LOOKAHEAD(2) + + AddIdentifierSegment(nameList, posList) + )* + ( + LOOKAHEAD(2) + + { + star = true; + nameList.add(""); + posList.add(getPos()); + } + )? + { + SqlParserPos pos = SqlParserPos.sum(posList); + if (star) { + return SqlIdentifier.star(nameList, pos, posList); + } + return new SqlIdentifier(nameList, null, pos, posList); + } +} + +/** + * Parses a compound identifier in the FROM clause. + */ +SqlIdentifier CompoundTableIdentifier() : +{ + final List nameList = new ArrayList(); + final List posList = new ArrayList(); +} +{ + AddTableIdentifierSegment(nameList, posList) + ( + LOOKAHEAD(2) + + AddTableIdentifierSegment(nameList, posList) + )* + { + SqlParserPos pos = SqlParserPos.sum(posList); + return new SqlIdentifier(nameList, null, pos, posList); + } +} + +/** + * Parses a comma-separated list of compound identifiers. + */ +void AddCompoundIdentifierTypes(List list, List extendList) : +{ +} +{ + AddCompoundIdentifierType(list, extendList) + ( AddCompoundIdentifierType(list, extendList))* +} + +/** + * List of compound identifiers in parentheses. The position extends from the + * open parenthesis to the close parenthesis. + */ +Pair ParenthesizedCompoundIdentifierList() : +{ + final Span s; + final List list = new ArrayList(); + final List extendList = new ArrayList(); +} +{ + { s = span(); } + AddCompoundIdentifierTypes(list, extendList) + { + return Pair.of(new SqlNodeList(list, s.end(this)), new SqlNodeList(extendList, s.end(this))); + } +} +<#else> + <#include "/@includes/compoundIdentifier.ftl" /> + + +/** + * Parses a NEW UDT(...) expression. + */ +SqlNode NewSpecification() : +{ + final Span s; + final SqlNode routineCall; +} +{ + { s = span(); } + routineCall = + NamedRoutineCall(SqlFunctionCategory.USER_DEFINED_CONSTRUCTOR, + ExprContext.ACCEPT_SUB_QUERY) { + return SqlStdOperatorTable.NEW.createCall(s.end(routineCall), routineCall); + } +} + +//TODO: real parse errors. +int UnsignedIntLiteral() : +{ + Token t; +} +{ + t = + { + try { + return Integer.parseInt(t.image); + } catch (NumberFormatException ex) { + throw SqlUtil.newContextException(getPos(), + RESOURCE.invalidLiteral(t.image, Integer.class.getCanonicalName())); + } + } +} + +int IntLiteral() : +{ + Token t; +} +{ + ( + t = + | + t = + ) + { + try { + return Integer.parseInt(t.image); + } catch (NumberFormatException ex) { + throw SqlUtil.newContextException(getPos(), + RESOURCE.invalidLiteral(t.image, Integer.class.getCanonicalName())); + } + } +| + t = { + try { + return -Integer.parseInt(t.image); + } catch (NumberFormatException ex) { + throw SqlUtil.newContextException(getPos(), + RESOURCE.invalidLiteral(t.image, Integer.class.getCanonicalName())); + } + } +} + +// Type name with optional scale and precision. +SqlDataTypeSpec DataType() : +{ + SqlTypeNameSpec typeName; + final Span s; +} +{ + typeName = TypeName() { + s = Span.of(typeName.getParserPos()); + } + ( + typeName = CollectionsTypeName(typeName) + )* + { + return new SqlDataTypeSpec(typeName, s.add(typeName.getParserPos()).pos()); + } +} + +// Some SQL type names need special handling due to the fact that they have +// spaces in them but are not quoted. +SqlTypeNameSpec TypeName() : +{ + final SqlTypeNameSpec typeNameSpec; + final SqlIdentifier typeName; + final Span s = Span.of(); +} +{ + ( +<#-- additional types are included here --> +<#-- put custom data types in front of Calcite core data types --> +<#list (parser.dataTypeParserMethods!default.parser.dataTypeParserMethods) as method> + LOOKAHEAD(2) + typeNameSpec = ${method} + | + + LOOKAHEAD(2) + typeNameSpec = SqlTypeName(s) + | + typeNameSpec = RowTypeName() + | + LOOKAHEAD(2) + typeNameSpec = MapTypeName() + | + typeName = CompoundIdentifier() { + typeNameSpec = new SqlUserDefinedTypeNameSpec(typeName, s.end(this)); + } + ) + { + return typeNameSpec; + } +} + +// Types used for JDBC and ODBC scalar conversion function +SqlTypeNameSpec SqlTypeName(Span s) : +{ + final SqlTypeNameSpec sqlTypeNameSpec; +} +{ + ( + sqlTypeNameSpec = SqlTypeName1(s) + | + sqlTypeNameSpec = SqlTypeName2(s) + | + sqlTypeNameSpec = SqlTypeName3(s) + | + sqlTypeNameSpec = CharacterTypeName(s) + | + sqlTypeNameSpec = DateTimeTypeName() + ) + { + return sqlTypeNameSpec; + } +} + +// Parse sql type name that don't allow any extra specifications except the type name. +// For extra specification, we mean precision, scale, charSet, etc. +SqlTypeNameSpec SqlTypeName1(Span s) : +{ + final SqlTypeName sqlTypeName; +} +{ + ( + { + if (!this.conformance.allowGeometry()) { + throw SqlUtil.newContextException(getPos(), RESOURCE.geometryDisabled()); + } + s.add(this); + sqlTypeName = SqlTypeName.GEOMETRY; + } + | + { s.add(this); sqlTypeName = SqlTypeName.BOOLEAN; } + | + ( | ) { s.add(this); sqlTypeName = SqlTypeName.INTEGER; } + | + { s.add(this); sqlTypeName = SqlTypeName.TINYINT; } + | + { s.add(this); sqlTypeName = SqlTypeName.SMALLINT; } + | + { s.add(this); sqlTypeName = SqlTypeName.BIGINT; } + | + { s.add(this); sqlTypeName = SqlTypeName.REAL; } + | + { s.add(this); } + [ ] { sqlTypeName = SqlTypeName.DOUBLE; } + | + { s.add(this); sqlTypeName = SqlTypeName.FLOAT; } + | + { s.add(this); sqlTypeName = SqlTypeName.VARIANT; } + | + { s.add(this); sqlTypeName = SqlTypeName.UUID; } + ) + { + return new SqlBasicTypeNameSpec(sqlTypeName, s.end(this)); + } +} + +// Parse sql type name that allows precision specification. +SqlTypeNameSpec SqlTypeName2(Span s) : +{ + final SqlTypeName sqlTypeName; + int precision = -1; +} +{ + ( + { s.add(this); } + ( + { sqlTypeName = SqlTypeName.VARBINARY; } + | + { sqlTypeName = SqlTypeName.BINARY; } + ) + | + { s.add(this); sqlTypeName = SqlTypeName.VARBINARY; } + ) + precision = PrecisionOpt() + { + return new SqlBasicTypeNameSpec(sqlTypeName, precision, s.end(this)); + } +} + +// Parse sql type name that allows precision and scale specifications. +SqlTypeNameSpec SqlTypeName3(Span s) : +{ + final SqlTypeName sqlTypeName; + int precision = RelDataType.PRECISION_NOT_SPECIFIED; + int scale = RelDataType.SCALE_NOT_SPECIFIED; +} +{ + ( + ( | | ) { s.add(this); sqlTypeName = SqlTypeName.DECIMAL; } + | + { s.add(this); sqlTypeName = SqlTypeName.ANY; } + ) + [ + + precision = UnsignedIntLiteral() + [ + + scale = IntLiteral() + ] + + ] + { + return new SqlBasicTypeNameSpec(sqlTypeName, precision, scale, s.end(this)); + } +} + +// Types used for for JDBC and ODBC scalar conversion function +SqlJdbcDataTypeName JdbcOdbcDataTypeName() : +{ +} +{ + ( | ) { return SqlJdbcDataTypeName.SQL_CHAR; } +| ( | ) { return SqlJdbcDataTypeName.SQL_VARCHAR; } +| ( | ) { return SqlJdbcDataTypeName.SQL_DATE; } +| ( |

{ + private final CsvTableFactory csvTableFactory; + private final DataTableFactory dataTableFactory; + private final FlatGeoBufTableFactory flatGeoBufTableFactory; + private final GeoPackageTableFactory geoPackageTableFactory; + private final GeoParquetTableFactory geoParquetTableFactory; + private final OpenStreetMapTableFactory openStreetMapTableFactory; + private final RpslTableFactory rpslTableFactory; + private final ShapefileTableFactory shapefileTableFactory; + /** * Constructor. */ - public BaremapsTableFactory() {} + public BaremapsTableFactory() { + this.csvTableFactory = new CsvTableFactory(); + this.dataTableFactory = new DataTableFactory(); + this.flatGeoBufTableFactory = new FlatGeoBufTableFactory(); + this.geoPackageTableFactory = new GeoPackageTableFactory(); + this.geoParquetTableFactory = new GeoParquetTableFactory(); + this.openStreetMapTableFactory = new OpenStreetMapTableFactory(); + this.rpslTableFactory = new RpslTableFactory(); + this.shapefileTableFactory = new ShapefileTableFactory(); + } @Override public Table create( @@ -68,269 +67,20 @@ public Table create( Map operand, RelDataType rowType) { String format = (String) operand.get("format"); - - // Create a type factory - Calcite doesn't expose one through SchemaPlus - RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl(); + if (format == null) { + throw new IllegalArgumentException("Format must be specified in the 'format' operand"); + } return switch (format) { - case "data" -> createDataTable(name, operand, typeFactory); - case "osm" -> createOpenStreetMapTable(operand); - case "csv" -> createCsvTable(operand); - case "shp" -> createShapefileTable(operand); - case "rpsl" -> createRpslTable(operand); - case "fgb" -> createFlatGeoBufTable(operand); - case "parquet" -> createGeoParquetTable(operand); - case "geopackage" -> createGeoPackageTable(operand); - default -> throw new RuntimeException("Unsupported format: " + format); + case "data" -> dataTableFactory.create(schema, name, operand, rowType); + case "osm" -> openStreetMapTableFactory.create(schema, name, operand, rowType); + case "csv" -> csvTableFactory.create(schema, name, operand, rowType); + case "shp" -> shapefileTableFactory.create(schema, name, operand, rowType); + case "rpsl" -> rpslTableFactory.create(schema, name, operand, rowType); + case "fgb" -> flatGeoBufTableFactory.create(schema, name, operand, rowType); + case "parquet" -> geoParquetTableFactory.create(schema, name, operand, rowType); + case "geopackage" -> geoPackageTableFactory.create(schema, name, operand, rowType); + default -> throw new IllegalArgumentException("Unsupported format: " + format); }; } - - /** - * Creates a Baremaps table. - * - * @param name the table name - * @param operand the operand properties - * @param typeFactory the type factory to use - * @return the created table - */ - private Table createDataTable( - String name, - Map operand, - RelDataTypeFactory typeFactory) { - String directory = (String) operand.get("directory"); - if (directory == null) { - throw new RuntimeException("A directory should be specified"); - } - try { - Memory memory = new MemoryMappedDirectory(Paths.get(directory)); - ByteBuffer header = memory.header(); - header.getLong(); // Skip the size - int length = header.getInt(); - byte[] bytes = new byte[length]; - header.get(bytes); - DataSchema dataSchema = DataSchema.read(new ByteArrayInputStream(bytes), typeFactory); - DataRowType dataRowType = new DataRowType(dataSchema); - DataCollection dataCollection = AppendOnlyLog.builder() - .dataType(dataRowType) - .memory(memory) - .build(); - return new DataModifiableTable( - name, - dataSchema, - dataCollection, - typeFactory); - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - /** - * Creates an OpenStreetMap table from a file. - * - * @param operand the operand properties - * @return the created table - */ - private Table createOpenStreetMapTable(Map operand) { - // Get the file path from the operand - String filePath = (String) operand.get("file"); - if (filePath == null) { - throw new IllegalArgumentException("File path must be specified in the 'file' operand"); - } - - try { - // Create a new input stream from the file - InputStream inputStream = new FileInputStream(filePath); - - // Create an entity reader based on the file extension - if (filePath.endsWith(".pbf") || filePath.endsWith(".osm.pbf")) { - return createTableFromPbf(inputStream); - } else if (filePath.endsWith(".xml") || filePath.endsWith(".osm")) { - return createTableFromXml(inputStream); - } else { - throw new IllegalArgumentException( - "Unsupported file format. Supported formats are .pbf, .osm.pbf, .xml, and .osm"); - } - } catch (IOException e) { - throw new RuntimeException("Failed to create OpenStreetMapTable from file: " + filePath, e); - } - } - - /** - * Creates a CSV table from a file. - * - * @param operand the operand properties - * @return the created table - */ - private Table createCsvTable(Map operand) { - // Get the file path from the operand - String filePath = (String) operand.get("file"); - if (filePath == null) { - throw new IllegalArgumentException("File path must be specified in the 'file' operand"); - } - - // Get the separator (default to comma) - String separatorStr = (String) operand.getOrDefault("separator", ","); - if (separatorStr.length() != 1) { - throw new IllegalArgumentException("Separator must be a single character"); - } - char separator = separatorStr.charAt(0); - - // Get whether the file has a header (default to true) - boolean hasHeader = (Boolean) operand.getOrDefault("hasHeader", true); - - try { - File file = new File(filePath); - return new CsvTable(file, separator, hasHeader); - } catch (IOException e) { - throw new RuntimeException("Failed to create CsvTable from file: " + filePath, e); - } - } - - /** - * Creates a shapefile table from a file. - * - * @param operand the operand properties - * @return the created table - */ - private Table createShapefileTable(Map operand) { - // Get the file path from the operand - String filePath = (String) operand.get("file"); - if (filePath == null) { - throw new IllegalArgumentException("File path must be specified in the 'file' operand"); - } - - try { - File file = new File(filePath); - return new ShapefileTable(file); - } catch (IOException e) { - throw new RuntimeException("Failed to create ShapefileTable from file: " + filePath, e); - } - } - - /** - * Creates a RPSL table from a file. - * - * @param operand the operand properties - * @return the created table - */ - private Table createRpslTable(Map operand) { - // Get the file path from the operand - String filePath = (String) operand.get("file"); - if (filePath == null) { - throw new IllegalArgumentException("File path must be specified in the 'file' operand"); - } - - try { - File file = new File(filePath); - return new RpslTable(file); - } catch (IOException e) { - throw new RuntimeException("Failed to create RpslTable from file: " + filePath, e); - } - } - - /** - * Creates a FlatGeoBuf table from a file. - * - * @param operand the operand properties - * @return the created table - */ - private Table createFlatGeoBufTable(Map operand) { - // Get the file path from the operand - String filePath = (String) operand.get("file"); - if (filePath == null) { - throw new IllegalArgumentException("File path must be specified in the 'file' operand"); - } - - try { - File file = new File(filePath); - return new FlatGeoBufTable(file); - } catch (IOException e) { - throw new RuntimeException("Failed to create FlatGeoBufTable from file: " + filePath, e); - } - } - - /** - * Create a table from a PBF file. - * - * @param inputStream the input stream - * @return the table - */ - public static OpenStreetMapTable createTableFromPbf(InputStream inputStream) { - return new OpenStreetMapTable(new PbfEntityReader().setGeometries(true), inputStream); - } - - /** - * Create a table from a PBF file. - * - * @param path the path to the PBF file - * @return the table - * @throws IOException if an I/O error occurs - */ - public static OpenStreetMapTable createTableFromPbf(Path path) throws IOException { - return createTableFromPbf(new FileInputStream(path.toFile())); - } - - /** - * Create a table from an XML file. - * - * @param inputStream the input stream - * @return the table - */ - public static OpenStreetMapTable createTableFromXml(InputStream inputStream) { - return new OpenStreetMapTable(new XmlEntityReader().setGeometries(true), inputStream); - } - - /** - * Create a table from an XML file. - * - * @param path the path to the XML file - * @return the table - * @throws IOException if an I/O error occurs - */ - public static OpenStreetMapTable createTableFromXml(Path path) throws IOException { - return createTableFromXml(new FileInputStream(path.toFile())); - } - - private Table createGeoParquetTable(Map operand) { - if (operand.size() < 2) { - throw new IllegalArgumentException("Missing file path for GeoParquet table"); - } - try { - String filePath = (String) operand.get("file"); - if (filePath == null) { - throw new IllegalArgumentException("File path must be specified in the 'file' operand"); - } - - // Create a type factory - Calcite doesn't expose one through SchemaPlus - RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl(); - - return new GeoParquetTable(new File(filePath), typeFactory); - } catch (IOException e) { - throw new RuntimeException("Failed to create GeoParquet table", e); - } - } - - private Table createGeoPackageTable(Map operand) { - if (operand.size() < 2) { - throw new IllegalArgumentException("Missing file path and table name for GeoPackage table"); - } - try { - String filePath = (String) operand.get("file"); - if (filePath == null) { - throw new IllegalArgumentException("File path must be specified in the 'file' operand"); - } - - String tableName = (String) operand.get("table"); - if (tableName == null) { - throw new IllegalArgumentException("Table name must be specified in the 'table' operand"); - } - - // Create a type factory - Calcite doesn't expose one through SchemaPlus - RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl(); - - return new GeoPackageTable(new File(filePath), tableName, typeFactory); - } catch (IOException e) { - throw new RuntimeException("Failed to create GeoPackage table", e); - } - } } diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/csv/CsvTable.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/csv/CsvTable.java index dd99fc1a4..80346970a 100644 --- a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/csv/CsvTable.java +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/csv/CsvTable.java @@ -43,8 +43,6 @@ public class CsvTable extends AbstractTable implements ScannableTable { private final File file; private final CsvSchema csvSchema; - private final boolean hasHeader; - private final char separator; private RelDataType rowType; /** @@ -57,8 +55,6 @@ public class CsvTable extends AbstractTable implements ScannableTable { */ public CsvTable(File file, char separator, boolean hasHeader) throws IOException { this.file = file; - this.separator = separator; - this.hasHeader = hasHeader; this.csvSchema = buildSchema(file, separator, hasHeader); } diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/csv/CsvTableFactory.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/csv/CsvTableFactory.java new file mode 100644 index 000000000..bbc893573 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/csv/CsvTableFactory.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.csv; + +import java.io.File; +import java.io.IOException; +import java.util.Map; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.schema.SchemaPlus; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.TableFactory; + +/** + * A table factory for creating CSV tables. + */ +public class CsvTableFactory implements TableFactory
{ + + /** + * Constructor. + */ + public CsvTableFactory() {} + + @Override + public Table create( + SchemaPlus schema, + String name, + Map operand, + RelDataType rowType) { + // Get the file path from the operand + String filePath = (String) operand.get("file"); + if (filePath == null) { + throw new IllegalArgumentException("File path must be specified in the 'file' operand"); + } + + // Get the separator (default to comma) + String separatorStr = (String) operand.getOrDefault("separator", ","); + if (separatorStr.length() != 1) { + throw new IllegalArgumentException("Separator must be a single character"); + } + char separator = separatorStr.charAt(0); + + // Get whether the file has a header (default to true) + boolean hasHeader = (Boolean) operand.getOrDefault("hasHeader", true); + + try { + File file = new File(filePath); + return new CsvTable(file, separator, hasHeader); + } catch (IOException e) { + throw new RuntimeException("Failed to create CsvTable from file: " + filePath, e); + } + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataModifiableTable.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataModifiableTable.java index 5f415b28d..4b3bdd6ab 100644 --- a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataModifiableTable.java +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataModifiableTable.java @@ -65,7 +65,7 @@ public class DataModifiableTable extends AbstractTable implements ModifiableTabl private final String name; private final RelProtoDataType protoRowType; private final RelDataType rowType; - private final DataSchema schema; + private final DataTableSchema schema; public final DataCollection rows; /** @@ -92,7 +92,7 @@ public DataModifiableTable(String name, columns.add(new DataColumnFixed(columnName, columnCardinality, relDataType)); }); - this.schema = new DataSchema(name, columns); + this.schema = new DataTableSchema(name, columns); // Create the collection DataRowType dataRowType = new DataRowType(schema); @@ -109,7 +109,7 @@ public DataModifiableTable(String name, * @param typeFactory the type factory */ public DataModifiableTable(String name, - DataSchema schema, + DataTableSchema schema, DataCollection rows, RelDataTypeFactory typeFactory) { super(); diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataRow.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataRow.java index 4ba72fdc3..0df5b4a7d 100644 --- a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataRow.java +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataRow.java @@ -23,7 +23,7 @@ /** * A row in a table with values corresponding to the schema columns. */ -public record DataRow(DataSchema schema, List values) { +public record DataRow(DataTableSchema schema, List values) { /** * Constructs a row with validation. diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataRowType.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataRowType.java index 2181aff6f..fad1a7f24 100644 --- a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataRowType.java +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataRowType.java @@ -118,14 +118,14 @@ public static DataType getType(SqlTypeName sqlTypeName) { return dataType; } - private final DataSchema rowType; + private final DataTableSchema rowType; /** * Constructs a DataRowType with the given schema. * * @param rowType the row schema */ - public DataRowType(DataSchema rowType) { + public DataRowType(DataTableSchema rowType) { this.rowType = Objects.requireNonNull(rowType, "Row type cannot be null"); } diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataSchema.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataSchema.java index a89e75dbf..7fd631bec 100644 --- a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataSchema.java +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataSchema.java @@ -17,278 +17,75 @@ package org.apache.baremaps.calcite.data; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.databind.DeserializationContext; -import com.fasterxml.jackson.databind.JsonDeserializer; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.jsontype.NamedType; -import com.fasterxml.jackson.databind.module.SimpleModule; -import com.fasterxml.jackson.databind.node.ObjectNode; +import java.io.File; +import java.io.FileInputStream; import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.io.Serializable; -import java.util.*; -import org.apache.baremaps.calcite.data.DataColumn.Cardinality; -import org.apache.calcite.rel.type.RelDataType; +import java.nio.MappedByteBuffer; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import org.apache.baremaps.data.collection.AppendOnlyLog; +import org.apache.baremaps.data.collection.DataCollection; +import org.apache.baremaps.data.memory.Memory; +import org.apache.baremaps.data.memory.MemoryMappedDirectory; import org.apache.calcite.rel.type.RelDataTypeFactory; -import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.impl.AbstractSchema; /** - * A {@link DataSchema} defines the structure of a table. + * A Calcite schema implementation for data stored in directories. This schema provides access to + * data through the Apache Calcite framework for SQL querying. */ -public record DataSchema(String name, - List columns) implements Serializable { +public class DataSchema extends AbstractSchema { - /** - * Constructs a schema with validation. - * - * @param name the name of the schema - * @param columns the columns in the schema - * @throws NullPointerException if name or columns is null - * @throws IllegalArgumentException if name is blank, columns is empty, or columns contains - * duplicates - */ - public DataSchema { - Objects.requireNonNull(name, "Schema name cannot be null"); - Objects.requireNonNull(columns, "Columns cannot be null"); - - if (name.isBlank()) { - throw new IllegalArgumentException("Schema name cannot be blank"); - } - - if (columns.isEmpty()) { - throw new IllegalArgumentException("Columns cannot be empty"); - } - - // Check for duplicate column names - Set columnNames = new HashSet<>(); - for (DataColumn column : columns) { - if (!columnNames.add(column.name())) { - throw new IllegalArgumentException("Duplicate column name: " + column.name()); - } - } - - // Make defensive copy - columns = List.copyOf(columns); - } + private final File directory; + private final Map tableMap; + private final RelDataTypeFactory typeFactory; /** - * Creates a new row for this schema with all values set to null. + * Constructs a DataSchema with the specified directory. * - * @return a new row - */ - public DataRow createRow() { - var values = new ArrayList<>(columns.size()); - for (int i = 0; i < columns.size(); i++) { - values.add(null); - } - return new DataRow(this, values); - } - - /** - * Gets a column by name. - * - * @param name the name of the column - * @return the column - * @throws IllegalArgumentException if the column does not exist - */ - public DataColumn getColumn(String name) { - Objects.requireNonNull(name, "Column name cannot be null"); - - for (DataColumn column : columns) { - if (column.name().equals(name)) { - return column; - } - } - throw new IllegalArgumentException("Column not found: " + name); - } - - /** - * Gets the index of a column by name. - * - * @param name the name of the column - * @return the index of the column - * @throws IllegalArgumentException if the column does not exist - */ - public int getColumnIndex(String name) { - Objects.requireNonNull(name, "Column name cannot be null"); - - for (int i = 0; i < columns.size(); i++) { - if (columns.get(i).name().equals(name)) { - return i; - } - } - throw new IllegalArgumentException("Column not found: " + name); - } - - /** - * Checks if a column exists. - * - * @param name the name of the column - * @return true if the column exists - */ - public boolean hasColumn(String name) { - Objects.requireNonNull(name, "Column name cannot be null"); - - for (DataColumn column : columns) { - if (column.name().equals(name)) { - return true; - } - } - return false; - } - - /** - * Custom JSON deserializer for DataSchema. + * @param directory the directory containing data subdirectories + * @param typeFactory the type factory to use for creating tables + * @throws IOException if an I/O error occurs */ - static class DataSchemaDeserializer extends JsonDeserializer { - private RelDataTypeFactory typeFactory; - - /** - * Constructs a DataSchemaDeserializer with the given type factory. - * - * @param typeFactory the type factory to use - */ - public DataSchemaDeserializer(RelDataTypeFactory typeFactory) { - this.typeFactory = Objects.requireNonNull(typeFactory, "Type factory cannot be null"); - } - - @Override - public DataSchema deserialize(JsonParser parser, DeserializationContext ctxt) - throws IOException { - ObjectNode node = parser.getCodec().readTree(parser); - if (!node.has("name")) { - throw new IOException("Missing required field: name"); - } - if (!node.has("columns")) { - throw new IOException("Missing required field: columns"); - } - - String name = node.get("name").asText(); - List columns = new ArrayList<>(); - - JsonNode columnsNode = node.get("columns"); - if (!columnsNode.isArray()) { - throw new IOException("columns field must be an array"); - } - - columnsNode.elements().forEachRemaining(column -> { - try { - columns.add(deserialize(column)); - } catch (Exception e) { - throw new RuntimeException("Error deserializing column", e); - } - }); - - return new DataSchema(name, columns); - } - - DataColumn deserialize(JsonNode node) { - if (!node.has("name") || !node.has("cardinality") || !node.has("sqlTypeName")) { - throw new IllegalArgumentException( - "Column is missing required fields: name, cardinality, or sqlTypeName"); - } - - String columnName = node.get("name").asText(); - Cardinality cardinality; - try { - cardinality = Cardinality.valueOf(node.get("cardinality").asText()); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - "Invalid cardinality value: " + node.get("cardinality").asText()); - } - - SqlTypeName sqlTypeName; - try { - sqlTypeName = SqlTypeName.valueOf(node.get("sqlTypeName").asText()); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - "Invalid SQL type name value: " + node.get("sqlTypeName").asText()); - } - - // Create the RelDataType based on the SqlTypeName - RelDataType relDataType; - if (sqlTypeName == SqlTypeName.ROW) { - if (!node.has("columns")) { - throw new IllegalArgumentException("Nested column is missing required field: columns"); - } - - List columns = new ArrayList<>(); - JsonNode columnsNode = node.get("columns"); - if (!columnsNode.isArray()) { - throw new IllegalArgumentException("columns field must be an array"); - } - - columnsNode.elements().forEachRemaining(column -> { - columns.add(deserialize(column)); - }); - - return DataColumnNested.of(columnName, cardinality, columns, typeFactory); - } else { - // Create basic type without nullability, precision, etc. - relDataType = typeFactory.createSqlType(sqlTypeName); - - // Handle nullability based on cardinality - if (cardinality == Cardinality.OPTIONAL) { - relDataType = typeFactory.createTypeWithNullability(relDataType, true); + public DataSchema(File directory, RelDataTypeFactory typeFactory) throws IOException { + this.directory = Objects.requireNonNull(directory, "Directory cannot be null"); + this.typeFactory = Objects.requireNonNull(typeFactory, "Type factory cannot be null"); + this.tableMap = new HashMap<>(); + + // Only process directories in the specified directory + File[] subdirectories = directory.listFiles(File::isDirectory); + if (subdirectories != null) { + for (File subdirectory : subdirectories) { + String tableName = subdirectory.getName(); + Path schemaPath = subdirectory.toPath().resolve("schema.json"); + + if (Files.exists(schemaPath)) { + // Read the schema from the schema.json file + try (FileInputStream fis = new FileInputStream(schemaPath.toFile())) { + DataTableSchema schema = DataTableSchema.read(fis, typeFactory); + + // Create the data collection + DataRowType dataRowType = new DataRowType(schema); + Memory memory = new MemoryMappedDirectory(schemaPath.getParent()); + DataCollection rows = AppendOnlyLog.builder() + .dataType(dataRowType) + .memory(memory) + .build(); + + // Create the table + tableMap.put(tableName, new DataModifiableTable(tableName, schema, rows, typeFactory)); + } } - - return new DataColumnFixed(columnName, cardinality, relDataType); } } } - /** - * Configures an ObjectMapper for DataSchema serialization/deserialization. - * - * @param typeFactory the type factory to use - * @return a configured ObjectMapper - */ - private static ObjectMapper configureObjectMapper(RelDataTypeFactory typeFactory) { - var mapper = new ObjectMapper(); - mapper.registerSubtypes( - new NamedType(DataColumnFixed.class, "FIXED"), - new NamedType(DataColumnNested.class, "NESTED")); - var module = new SimpleModule(); - module.addDeserializer(DataSchema.class, new DataSchemaDeserializer(typeFactory)); - mapper.registerModule(module); - return mapper; - } - - /** - * Reads a DataSchema from an input stream. - * - * @param inputStream the input stream - * @param typeFactory the type factory to use - * @return the schema - * @throws IOException if an I/O error occurs - */ - public static DataSchema read(InputStream inputStream, RelDataTypeFactory typeFactory) - throws IOException { - Objects.requireNonNull(inputStream, "Input stream cannot be null"); - Objects.requireNonNull(typeFactory, "Type factory cannot be null"); - - var mapper = configureObjectMapper(typeFactory); - return mapper.readValue(inputStream, DataSchema.class); - } - - /** - * Writes a DataSchema to an output stream. - * - * @param outputStream the output stream - * @param schema the schema - * @param typeFactory the type factory to use - * @throws IOException if an I/O error occurs - */ - public static void write(OutputStream outputStream, DataSchema schema, - RelDataTypeFactory typeFactory) throws IOException { - Objects.requireNonNull(outputStream, "Output stream cannot be null"); - Objects.requireNonNull(schema, "Schema cannot be null"); - Objects.requireNonNull(typeFactory, "Type factory cannot be null"); - - var mapper = configureObjectMapper(typeFactory); - mapper.writeValue(outputStream, schema); + @Override + protected Map getTableMap() { + return tableMap; } } diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataTableFactory.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataTableFactory.java new file mode 100644 index 000000000..72f1cea64 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataTableFactory.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.data; + +import com.fasterxml.jackson.databind.ObjectMapper; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Map; +import org.apache.baremaps.data.collection.AppendOnlyLog; +import org.apache.baremaps.data.collection.DataCollection; +import org.apache.baremaps.data.memory.Memory; +import org.apache.baremaps.data.memory.MemoryMappedDirectory; +import org.apache.calcite.jdbc.JavaTypeFactoryImpl; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.schema.SchemaPlus; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.TableFactory; + +/** + * A table factory for creating Data tables. + */ +public class DataTableFactory implements TableFactory
{ + + private static final RelDataTypeFactory TYPE_FACTORY = new JavaTypeFactoryImpl(); + + /** + * Constructor. + */ + public DataTableFactory() {} + + @Override + public Table create( + SchemaPlus schema, + String name, + Map operand, + RelDataType rowType) { + String file = (String) operand.get("file"); + if (file == null) { + throw new RuntimeException("A file should be specified"); + } + + try { + Memory memory = new MemoryMappedDirectory(Paths.get(file)); + ByteBuffer header = memory.header(); + + // For new tables, initialize with schema + if (rowType != null) { + // Create and serialize schema + Map schemaMap = new HashMap<>(); + schemaMap.put("name", name); + schemaMap.put("columns", rowType.getFieldList().stream() + .map(field -> { + Map column = new HashMap<>(); + column.put("name", field.getName()); + column.put("cardinality", + field.getType().isNullable() ? DataColumn.Cardinality.OPTIONAL.name() + : DataColumn.Cardinality.REQUIRED.name()); + column.put("sqlTypeName", field.getType().getSqlTypeName().name()); + return column; + }) + .toList()); + + // Serialize and write schema to header + byte[] schemaBytes = new ObjectMapper().writeValueAsBytes(schemaMap); + header.putLong(0L); + header.putInt(schemaBytes.length); + header.put(schemaBytes); + } + + // Read schema and create table + header.position(0); + long size = header.getLong(); + int length = header.getInt(); + byte[] bytes = new byte[length]; + header.get(bytes); + DataTableSchema dataSchema = + DataTableSchema.read(new ByteArrayInputStream(bytes), TYPE_FACTORY); + DataRowType dataRowType = new DataRowType(dataSchema); + DataCollection dataCollection = AppendOnlyLog.builder() + .dataType(dataRowType) + .memory(memory) + .build(); + + return new DataModifiableTable(name, dataSchema, dataCollection, TYPE_FACTORY); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataTableSchema.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataTableSchema.java new file mode 100644 index 000000000..5e643a30b --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/data/DataTableSchema.java @@ -0,0 +1,294 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.data; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.jsontype.NamedType; +import com.fasterxml.jackson.databind.module.SimpleModule; +import com.fasterxml.jackson.databind.node.ObjectNode; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.Serializable; +import java.util.*; +import org.apache.baremaps.calcite.data.DataColumn.Cardinality; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.sql.type.SqlTypeName; + +/** + * A {@link DataTableSchema} defines the structure of a table. + */ +public record DataTableSchema(String name, + List columns) implements Serializable { + + /** + * Constructs a schema with validation. + * + * @param name the name of the schema + * @param columns the columns in the schema + * @throws NullPointerException if name or columns is null + * @throws IllegalArgumentException if name is blank, columns is empty, or columns contains + * duplicates + */ + public DataTableSchema { + Objects.requireNonNull(name, "Schema name cannot be null"); + Objects.requireNonNull(columns, "Columns cannot be null"); + + if (name.isBlank()) { + throw new IllegalArgumentException("Schema name cannot be blank"); + } + + if (columns.isEmpty()) { + throw new IllegalArgumentException("Columns cannot be empty"); + } + + // Check for duplicate column names + Set columnNames = new HashSet<>(); + for (DataColumn column : columns) { + if (!columnNames.add(column.name())) { + throw new IllegalArgumentException("Duplicate column name: " + column.name()); + } + } + + // Make defensive copy + columns = List.copyOf(columns); + } + + /** + * Creates a new row for this schema with all values set to null. + * + * @return a new row + */ + public DataRow createRow() { + var values = new ArrayList<>(columns.size()); + for (int i = 0; i < columns.size(); i++) { + values.add(null); + } + return new DataRow(this, values); + } + + /** + * Gets a column by name. + * + * @param name the name of the column + * @return the column + * @throws IllegalArgumentException if the column does not exist + */ + public DataColumn getColumn(String name) { + Objects.requireNonNull(name, "Column name cannot be null"); + + for (DataColumn column : columns) { + if (column.name().equals(name)) { + return column; + } + } + throw new IllegalArgumentException("Column not found: " + name); + } + + /** + * Gets the index of a column by name. + * + * @param name the name of the column + * @return the index of the column + * @throws IllegalArgumentException if the column does not exist + */ + public int getColumnIndex(String name) { + Objects.requireNonNull(name, "Column name cannot be null"); + + for (int i = 0; i < columns.size(); i++) { + if (columns.get(i).name().equals(name)) { + return i; + } + } + throw new IllegalArgumentException("Column not found: " + name); + } + + /** + * Checks if a column exists. + * + * @param name the name of the column + * @return true if the column exists + */ + public boolean hasColumn(String name) { + Objects.requireNonNull(name, "Column name cannot be null"); + + for (DataColumn column : columns) { + if (column.name().equals(name)) { + return true; + } + } + return false; + } + + /** + * Custom JSON deserializer for DataSchema. + */ + static class DataSchemaDeserializer extends JsonDeserializer { + private RelDataTypeFactory typeFactory; + + /** + * Constructs a DataSchemaDeserializer with the given type factory. + * + * @param typeFactory the type factory to use + */ + public DataSchemaDeserializer(RelDataTypeFactory typeFactory) { + this.typeFactory = Objects.requireNonNull(typeFactory, "Type factory cannot be null"); + } + + @Override + public DataTableSchema deserialize(JsonParser parser, DeserializationContext ctxt) + throws IOException { + ObjectNode node = parser.getCodec().readTree(parser); + if (!node.has("name")) { + throw new IOException("Missing required field: name"); + } + if (!node.has("columns")) { + throw new IOException("Missing required field: columns"); + } + + String name = node.get("name").asText(); + List columns = new ArrayList<>(); + + JsonNode columnsNode = node.get("columns"); + if (!columnsNode.isArray()) { + throw new IOException("columns field must be an array"); + } + + columnsNode.elements().forEachRemaining(column -> { + try { + columns.add(deserialize(column)); + } catch (Exception e) { + throw new RuntimeException("Error deserializing column", e); + } + }); + + return new DataTableSchema(name, columns); + } + + DataColumn deserialize(JsonNode node) { + if (!node.has("name") || !node.has("cardinality") || !node.has("sqlTypeName")) { + throw new IllegalArgumentException( + "Column is missing required fields: name, cardinality, or sqlTypeName"); + } + + String columnName = node.get("name").asText(); + Cardinality cardinality; + try { + cardinality = Cardinality.valueOf(node.get("cardinality").asText()); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + "Invalid cardinality value: " + node.get("cardinality").asText()); + } + + SqlTypeName sqlTypeName; + try { + sqlTypeName = SqlTypeName.valueOf(node.get("sqlTypeName").asText()); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + "Invalid SQL type name value: " + node.get("sqlTypeName").asText()); + } + + // Create the RelDataType based on the SqlTypeName + RelDataType relDataType; + if (sqlTypeName == SqlTypeName.ROW) { + if (!node.has("columns")) { + throw new IllegalArgumentException("Nested column is missing required field: columns"); + } + + List columns = new ArrayList<>(); + JsonNode columnsNode = node.get("columns"); + if (!columnsNode.isArray()) { + throw new IllegalArgumentException("columns field must be an array"); + } + + columnsNode.elements().forEachRemaining(column -> { + columns.add(deserialize(column)); + }); + + return DataColumnNested.of(columnName, cardinality, columns, typeFactory); + } else { + // Create basic type without nullability, precision, etc. + relDataType = typeFactory.createSqlType(sqlTypeName); + + // Handle nullability based on cardinality + if (cardinality == Cardinality.OPTIONAL) { + relDataType = typeFactory.createTypeWithNullability(relDataType, true); + } + + return new DataColumnFixed(columnName, cardinality, relDataType); + } + } + } + + /** + * Configures an ObjectMapper for DataSchema serialization/deserialization. + * + * @param typeFactory the type factory to use + * @return a configured ObjectMapper + */ + private static ObjectMapper configureObjectMapper(RelDataTypeFactory typeFactory) { + var mapper = new ObjectMapper(); + mapper.registerSubtypes( + new NamedType(DataColumnFixed.class, "FIXED"), + new NamedType(DataColumnNested.class, "NESTED")); + var module = new SimpleModule(); + module.addDeserializer(DataTableSchema.class, new DataSchemaDeserializer(typeFactory)); + mapper.registerModule(module); + return mapper; + } + + /** + * Reads a DataSchema from an input stream. + * + * @param inputStream the input stream + * @param typeFactory the type factory to use + * @return the schema + * @throws IOException if an I/O error occurs + */ + public static DataTableSchema read(InputStream inputStream, RelDataTypeFactory typeFactory) + throws IOException { + Objects.requireNonNull(inputStream, "Input stream cannot be null"); + Objects.requireNonNull(typeFactory, "Type factory cannot be null"); + + var mapper = configureObjectMapper(typeFactory); + return mapper.readValue(inputStream, DataTableSchema.class); + } + + /** + * Writes a DataSchema to an output stream. + * + * @param outputStream the output stream + * @param schema the schema + * @param typeFactory the type factory to use + * @throws IOException if an I/O error occurs + */ + public static void write(OutputStream outputStream, DataTableSchema schema, + RelDataTypeFactory typeFactory) throws IOException { + Objects.requireNonNull(outputStream, "Output stream cannot be null"); + Objects.requireNonNull(schema, "Schema cannot be null"); + Objects.requireNonNull(typeFactory, "Type factory cannot be null"); + + var mapper = configureObjectMapper(typeFactory); + mapper.writeValue(outputStream, schema); + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlAttributeDefinition.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlAttributeDefinition.java new file mode 100644 index 000000000..3a0c16b16 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlAttributeDefinition.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import com.google.common.collect.ImmutableList; +import java.util.List; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Parse tree for SqlAttributeDefinition, which is part of a {@link SqlCreateType}. + */ +public class SqlAttributeDefinition extends SqlCall { + private static final SqlSpecialOperator OPERATOR = + new SqlSpecialOperator("ATTRIBUTE_DEF", SqlKind.ATTRIBUTE_DEF); + + public final SqlIdentifier name; + public final SqlDataTypeSpec dataType; + final @Nullable SqlNode expression; + final @Nullable SqlCollation collation; + + /** Creates a SqlAttributeDefinition; use {@link SqlDdlNodes#attribute}. */ + SqlAttributeDefinition(SqlParserPos pos, SqlIdentifier name, + SqlDataTypeSpec dataType, @Nullable SqlNode expression, @Nullable SqlCollation collation) { + super(pos); + this.name = name; + this.dataType = dataType; + this.expression = expression; + this.collation = collation; + } + + @Override + public SqlOperator getOperator() { + return OPERATOR; + } + + @Override + public List getOperandList() { + return ImmutableList.of(name, dataType); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + name.unparse(writer, 0, 0); + dataType.unparse(writer, 0, 0); + if (collation != null) { + writer.keyword("COLLATE"); + collation.unparse(writer); + } + if (Boolean.FALSE.equals(dataType.getNullable())) { + writer.keyword("NOT NULL"); + } + SqlNode expression = this.expression; + if (expression != null) { + writer.keyword("DEFAULT"); + SqlColumnDeclaration.exp(writer, expression); + } + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCheckConstraint.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCheckConstraint.java new file mode 100644 index 000000000..0f0cdb847 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCheckConstraint.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import java.util.List; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.util.ImmutableNullableList; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Parse tree for {@code UNIQUE}, {@code PRIMARY KEY} constraints. + * + *

+ * And {@code FOREIGN KEY}, when we support it. + */ +public class SqlCheckConstraint extends SqlCall { + private static final SqlSpecialOperator OPERATOR = + new SqlSpecialOperator("CHECK", SqlKind.CHECK); + + private final @Nullable SqlIdentifier name; + private final SqlNode expression; + + /** Creates a SqlCheckConstraint; use {@link SqlDdlNodes#check}. */ + SqlCheckConstraint(SqlParserPos pos, @Nullable SqlIdentifier name, + SqlNode expression) { + super(pos); + this.name = name; // may be null + this.expression = expression; + } + + @Override + public SqlOperator getOperator() { + return OPERATOR; + } + + @SuppressWarnings("nullness") + @Override + public List getOperandList() { + return ImmutableNullableList.of(name, expression); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + if (name != null) { + writer.keyword("CONSTRAINT"); + name.unparse(writer, 0, 0); + } + writer.keyword("CHECK"); + if (writer.isAlwaysUseParentheses()) { + expression.unparse(writer, 0, 0); + } else { + writer.sep("("); + expression.unparse(writer, 0, 0); + writer.sep(")"); + } + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlColumnDeclaration.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlColumnDeclaration.java new file mode 100644 index 000000000..cb370c6a6 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlColumnDeclaration.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import com.google.common.collect.ImmutableList; +import java.util.List; +import org.apache.calcite.schema.ColumnStrategy; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Parse tree for {@code UNIQUE}, {@code PRIMARY KEY} constraints. + * + *

+ * And {@code FOREIGN KEY}, when we support it. + */ +public class SqlColumnDeclaration extends SqlCall { + private static final SqlSpecialOperator OPERATOR = + new SqlSpecialOperator("COLUMN_DECL", SqlKind.COLUMN_DECL); + + public final SqlIdentifier name; + public final SqlDataTypeSpec dataType; + public final @Nullable SqlNode expression; + public final ColumnStrategy strategy; + + /** Creates a SqlColumnDeclaration; use {@link SqlDdlNodes#column}. */ + SqlColumnDeclaration(SqlParserPos pos, SqlIdentifier name, + SqlDataTypeSpec dataType, @Nullable SqlNode expression, + ColumnStrategy strategy) { + super(pos); + this.name = name; + this.dataType = dataType; + this.expression = expression; + this.strategy = strategy; + } + + @Override + public SqlOperator getOperator() { + return OPERATOR; + } + + @Override + public List getOperandList() { + return ImmutableList.of(name, dataType); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + name.unparse(writer, 0, 0); + dataType.unparse(writer, 0, 0); + if (Boolean.FALSE.equals(dataType.getNullable())) { + writer.keyword("NOT NULL"); + } + SqlNode expression = this.expression; + if (expression != null) { + switch (strategy) { + case VIRTUAL: + case STORED: + writer.keyword("AS"); + exp(writer, expression); + writer.keyword(strategy.name()); + break; + case DEFAULT: + writer.keyword("DEFAULT"); + exp(writer, expression); + break; + default: + throw new AssertionError("unexpected: " + strategy); + } + } + } + + static void exp(SqlWriter writer, SqlNode expression) { + if (writer.isAlwaysUseParentheses()) { + expression.unparse(writer, 0, 0); + } else { + writer.sep("("); + expression.unparse(writer, 0, 0); + writer.sep(")"); + } + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateForeignSchema.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateForeignSchema.java new file mode 100644 index 000000000..441245ea4 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateForeignSchema.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; +import static org.apache.calcite.linq4j.Nullness.castNonNull; + +import com.google.common.collect.ImmutableList; +import java.util.AbstractList; +import java.util.List; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.util.ImmutableNullableList; +import org.apache.calcite.util.Pair; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Parse tree for {@code CREATE FOREIGN SCHEMA} statement. + */ +public class SqlCreateForeignSchema extends SqlCreate { + public final SqlIdentifier name; + public final @Nullable SqlNode type; + public final @Nullable SqlNode library; + private final @Nullable SqlNodeList optionList; + + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("CREATE FOREIGN SCHEMA", + SqlKind.CREATE_FOREIGN_SCHEMA); + + /** Creates a SqlCreateForeignSchema. */ + SqlCreateForeignSchema(SqlParserPos pos, boolean replace, boolean ifNotExists, + SqlIdentifier name, @Nullable SqlNode type, @Nullable SqlNode library, + @Nullable SqlNodeList optionList) { + super(OPERATOR, pos, replace, ifNotExists); + this.name = requireNonNull(name, "name"); + this.type = type; + this.library = library; + checkArgument((type == null) != (library == null), + "of type and library, exactly one must be specified"); + this.optionList = optionList; // may be null + } + + @SuppressWarnings("nullness") + @Override + public List getOperandList() { + return ImmutableNullableList.of(name, type, library, optionList); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + if (getReplace()) { + writer.keyword("CREATE OR REPLACE"); + } else { + writer.keyword("CREATE"); + } + writer.keyword("FOREIGN SCHEMA"); + if (ifNotExists) { + writer.keyword("IF NOT EXISTS"); + } + name.unparse(writer, leftPrec, rightPrec); + if (library != null) { + writer.keyword("LIBRARY"); + library.unparse(writer, 0, 0); + } + if (type != null) { + writer.keyword("TYPE"); + type.unparse(writer, 0, 0); + } + if (optionList != null) { + writer.keyword("OPTIONS"); + SqlWriter.Frame frame = writer.startList("(", ")"); + int i = 0; + for (Pair c : options()) { + if (i++ > 0) { + writer.sep(","); + } + c.left.unparse(writer, 0, 0); + c.right.unparse(writer, 0, 0); + } + writer.endList(frame); + } + } + + /** Returns options as a list of (name, value) pairs. */ + public List> options() { + return options(optionList); + } + + private static List> options( + final @Nullable SqlNodeList optionList) { + if (optionList == null) { + return ImmutableList.of(); + } + return new AbstractList>() { + @Override + public Pair get(int index) { + return Pair.of((SqlIdentifier) castNonNull(optionList.get(index * 2)), + castNonNull(optionList.get(index * 2 + 1))); + } + + @Override + public int size() { + return optionList.size() / 2; + } + }; + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateFunction.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateFunction.java new file mode 100644 index 000000000..8054919b0 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateFunction.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import static com.google.common.base.Preconditions.checkArgument; +import static java.util.Objects.requireNonNull; + +import java.util.Arrays; +import java.util.List; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.util.Pair; +import org.apache.calcite.util.Util; + +/** + * Parse tree for {@code CREATE FUNCTION} statement. + */ +public class SqlCreateFunction extends SqlCreate { + private final SqlIdentifier name; + private final SqlNode className; + private final SqlNodeList usingList; + + private static final SqlSpecialOperator OPERATOR = + new SqlSpecialOperator("CREATE FUNCTION", SqlKind.CREATE_FUNCTION); + + /** Creates a SqlCreateFunction. */ + public SqlCreateFunction(SqlParserPos pos, boolean replace, + boolean ifNotExists, SqlIdentifier name, + SqlNode className, SqlNodeList usingList) { + super(OPERATOR, pos, replace, ifNotExists); + this.name = requireNonNull(name, "name"); + this.className = requireNonNull(className, "className"); + this.usingList = requireNonNull(usingList, "usingList"); + checkArgument(usingList.size() % 2 == 0); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, + int rightPrec) { + writer.keyword(getReplace() ? "CREATE OR REPLACE" : "CREATE"); + writer.keyword("FUNCTION"); + if (ifNotExists) { + writer.keyword("IF NOT EXISTS"); + } + name.unparse(writer, 0, 0); + writer.keyword("AS"); + className.unparse(writer, 0, 0); + if (!usingList.isEmpty()) { + writer.keyword("USING"); + final SqlWriter.Frame frame = + writer.startList(SqlWriter.FrameTypeEnum.SIMPLE); + for (Pair using : pairs()) { + writer.sep(","); + using.left.unparse(writer, 0, 0); // FILE, URL or ARCHIVE + using.right.unparse(writer, 0, 0); // e.g. 'file:foo/bar.jar' + } + writer.endList(frame); + } + } + + @SuppressWarnings("unchecked") + private List> pairs() { + return Util.pairs((List) usingList); + } + + @Override + public SqlOperator getOperator() { + return OPERATOR; + } + + @Override + public List getOperandList() { + return Arrays.asList(name, className, usingList); + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateMaterializedView.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateMaterializedView.java new file mode 100644 index 000000000..057d9e4d7 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateMaterializedView.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import static java.util.Objects.requireNonNull; + +import java.util.List; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.util.ImmutableNullableList; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Parse tree for {@code CREATE MATERIALIZED VIEW} statement. + */ +public class SqlCreateMaterializedView extends SqlCreate { + public final SqlIdentifier name; + public final @Nullable SqlNodeList columnList; + public final SqlNode query; + + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("CREATE MATERIALIZED VIEW", + SqlKind.CREATE_MATERIALIZED_VIEW); + + /** Creates a SqlCreateView. */ + SqlCreateMaterializedView(SqlParserPos pos, boolean replace, + boolean ifNotExists, SqlIdentifier name, @Nullable SqlNodeList columnList, + SqlNode query) { + super(OPERATOR, pos, replace, ifNotExists); + this.name = requireNonNull(name, "name"); + this.columnList = columnList; // may be null + this.query = requireNonNull(query, "query"); + } + + @SuppressWarnings("nullness") + @Override + public List getOperandList() { + return ImmutableNullableList.of(name, columnList, query); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + writer.keyword("CREATE"); + writer.keyword("MATERIALIZED VIEW"); + if (ifNotExists) { + writer.keyword("IF NOT EXISTS"); + } + name.unparse(writer, leftPrec, rightPrec); + if (columnList != null) { + SqlWriter.Frame frame = writer.startList("(", ")"); + for (SqlNode c : columnList) { + writer.sep(","); + c.unparse(writer, 0, 0); + } + writer.endList(frame); + } + writer.keyword("AS"); + writer.newlineAndIndent(); + query.unparse(writer, 0, 0); + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateSchema.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateSchema.java new file mode 100644 index 000000000..ab98e0686 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateSchema.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import static java.util.Objects.requireNonNull; + +import java.util.List; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.util.ImmutableNullableList; + +/** + * Parse tree for {@code CREATE SCHEMA} statement. + */ +public class SqlCreateSchema extends SqlCreate { + public final SqlIdentifier name; + + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("CREATE SCHEMA", SqlKind.CREATE_SCHEMA); + + /** Creates a SqlCreateSchema. */ + SqlCreateSchema(SqlParserPos pos, boolean replace, boolean ifNotExists, + SqlIdentifier name) { + super(OPERATOR, pos, replace, ifNotExists); + this.name = requireNonNull(name, "name"); + } + + @Override + public List getOperandList() { + return ImmutableNullableList.of(name); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + if (getReplace()) { + writer.keyword("CREATE OR REPLACE"); + } else { + writer.keyword("CREATE"); + } + writer.keyword("SCHEMA"); + if (ifNotExists) { + writer.keyword("IF NOT EXISTS"); + } + name.unparse(writer, leftPrec, rightPrec); + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateTable.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateTable.java new file mode 100644 index 000000000..a00ab9707 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateTable.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import static java.util.Objects.requireNonNull; + +import java.util.List; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.util.ImmutableNullableList; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Parse tree for {@code CREATE TABLE} statement. + */ +public class SqlCreateTable extends SqlCreate { + public final SqlIdentifier name; + public final @Nullable SqlNodeList columnList; + public final @Nullable SqlNode query; + public final @Nullable SqlNodeList withOptions; + + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("CREATE TABLE", SqlKind.CREATE_TABLE); + + /** Creates a SqlCreateTable. */ + protected SqlCreateTable(SqlParserPos pos, boolean replace, boolean ifNotExists, + SqlIdentifier name, @Nullable SqlNodeList columnList, @Nullable SqlNode query, + @Nullable SqlNodeList withOptions) { + super(OPERATOR, pos, replace, ifNotExists); + this.name = requireNonNull(name, "name"); + this.columnList = columnList; // may be null + this.query = query; // for "CREATE TABLE ... AS query"; may be null + this.withOptions = withOptions; // may be null + } + + @SuppressWarnings("nullness") + @Override + public List getOperandList() { + return ImmutableNullableList.of(name, columnList, query); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + writer.keyword("CREATE"); + writer.keyword("TABLE"); + if (ifNotExists) { + writer.keyword("IF NOT EXISTS"); + } + name.unparse(writer, leftPrec, rightPrec); + if (columnList != null) { + SqlWriter.Frame frame = writer.startList("(", ")"); + for (SqlNode c : columnList) { + writer.sep(","); + c.unparse(writer, 0, 0); + } + writer.endList(frame); + } + if (query != null) { + writer.keyword("AS"); + writer.newlineAndIndent(); + query.unparse(writer, 0, 0); + } + if (withOptions != null) { + SqlWriter.Frame frame = writer.startList("WITH (", ")"); + for (SqlNode c : withOptions) { + writer.sep(","); + c.unparse(writer, 0, 0); + } + writer.endList(frame); + } + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateTableLike.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateTableLike.java new file mode 100644 index 000000000..017b1e989 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateTableLike.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import static com.google.common.base.Preconditions.checkArgument; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.util.ImmutableNullableList; + +/** + * Parse tree for {@code CREATE TABLE LIKE} statement. + */ +public class SqlCreateTableLike extends SqlCreate { + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("CREATE TABLE LIKE", SqlKind.CREATE_TABLE_LIKE); + + /** + * The LikeOption specify which additional properties of the original table to copy. + */ + public enum LikeOption implements Symbolizable { + ALL, + DEFAULTS, + GENERATED + } + + public final SqlIdentifier name; + public final SqlIdentifier sourceTable; + public final SqlNodeList includingOptions; + public final SqlNodeList excludingOptions; + + + public SqlCreateTableLike(SqlParserPos pos, boolean replace, boolean ifNotExists, + SqlIdentifier name, SqlIdentifier sourceTable, + SqlNodeList includingOptions, SqlNodeList excludingOptions) { + super(OPERATOR, pos, replace, ifNotExists); + this.name = name; + this.sourceTable = sourceTable; + this.includingOptions = includingOptions; + this.excludingOptions = excludingOptions; + + // validate like options + if (includingOptions.contains(LikeOption.ALL.symbol(SqlParserPos.ZERO))) { + checkArgument(includingOptions.size() == 1 && excludingOptions.isEmpty(), + "ALL cannot be used with other options"); + } else if (excludingOptions.contains(LikeOption.ALL.symbol(SqlParserPos.ZERO))) { + checkArgument(excludingOptions.size() == 1 && includingOptions.isEmpty(), + "ALL cannot be used with other options"); + } + + includingOptions.forEach(option -> checkArgument(!excludingOptions.contains(option), + "Cannot include and exclude option %s at same time", + option.toString())); + } + + @Override + public List getOperandList() { + return ImmutableNullableList.of(name, sourceTable, includingOptions, + excludingOptions); + } + + public Set options() { + return includingOptions.stream() + .map(c -> ((SqlLiteral) c).symbolValue(LikeOption.class)) + .collect(Collectors.toSet()); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + writer.keyword("CREATE"); + writer.keyword("TABLE"); + if (ifNotExists) { + writer.keyword("IF NOT EXISTS"); + } + name.unparse(writer, leftPrec, rightPrec); + writer.keyword("LIKE"); + sourceTable.unparse(writer, leftPrec, rightPrec); + for (SqlNode c : new HashSet<>(includingOptions)) { + LikeOption likeOption = ((SqlLiteral) c).getValueAs(LikeOption.class); + writer.newlineAndIndent(); + writer.keyword("INCLUDING"); + writer.keyword(likeOption.name()); + } + + for (SqlNode c : new HashSet<>(excludingOptions)) { + LikeOption likeOption = ((SqlLiteral) c).getValueAs(LikeOption.class); + writer.newlineAndIndent(); + writer.keyword("EXCLUDING"); + writer.keyword(likeOption.name()); + } + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateType.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateType.java new file mode 100644 index 000000000..c9dd3ba61 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateType.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import static java.util.Objects.requireNonNull; + +import java.util.List; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.util.ImmutableNullableList; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Parse tree for {@code CREATE TYPE} statement. + */ +public class SqlCreateType extends SqlCreate { + public final SqlIdentifier name; + public final @Nullable SqlNodeList attributeDefs; + public final @Nullable SqlDataTypeSpec dataType; + + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("CREATE TYPE", SqlKind.CREATE_TYPE); + + /** Creates a SqlCreateType. */ + SqlCreateType(SqlParserPos pos, boolean replace, SqlIdentifier name, + @Nullable SqlNodeList attributeDefs, @Nullable SqlDataTypeSpec dataType) { + super(OPERATOR, pos, replace, false); + this.name = requireNonNull(name, "name"); + this.attributeDefs = attributeDefs; // may be null + this.dataType = dataType; // may be null + } + + @SuppressWarnings("nullness") + @Override + public List getOperandList() { + return ImmutableNullableList.of(name, attributeDefs); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + if (getReplace()) { + writer.keyword("CREATE OR REPLACE"); + } else { + writer.keyword("CREATE"); + } + writer.keyword("TYPE"); + name.unparse(writer, leftPrec, rightPrec); + writer.keyword("AS"); + if (attributeDefs != null) { + SqlWriter.Frame frame = writer.startList("(", ")"); + for (SqlNode a : attributeDefs) { + writer.sep(","); + a.unparse(writer, 0, 0); + } + writer.endList(frame); + } else if (dataType != null) { + dataType.unparse(writer, leftPrec, rightPrec); + } + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateView.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateView.java new file mode 100644 index 000000000..47cb1588f --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlCreateView.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import static java.util.Objects.requireNonNull; + +import java.util.List; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.util.ImmutableNullableList; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Parse tree for {@code CREATE VIEW} statement. + */ +public class SqlCreateView extends SqlCreate { + public final SqlIdentifier name; + public final @Nullable SqlNodeList columnList; + public final SqlNode query; + + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("CREATE VIEW", SqlKind.CREATE_VIEW); + + /** Creates a SqlCreateView. */ + SqlCreateView(SqlParserPos pos, boolean replace, SqlIdentifier name, + @Nullable SqlNodeList columnList, SqlNode query) { + super(OPERATOR, pos, replace, false); + this.name = requireNonNull(name, "name"); + this.columnList = columnList; // may be null + this.query = requireNonNull(query, "query"); + } + + @SuppressWarnings("nullness") + @Override + public List getOperandList() { + return ImmutableNullableList.of(name, columnList, query); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + if (getReplace()) { + writer.keyword("CREATE OR REPLACE"); + } else { + writer.keyword("CREATE"); + } + writer.keyword("VIEW"); + name.unparse(writer, leftPrec, rightPrec); + if (columnList != null) { + SqlWriter.Frame frame = writer.startList("(", ")"); + for (SqlNode c : columnList) { + writer.sep(","); + c.unparse(writer, 0, 0); + } + writer.endList(frame); + } + writer.keyword("AS"); + writer.newlineAndIndent(); + query.unparse(writer, 0, 0); + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDdlNodes.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDdlNodes.java new file mode 100644 index 000000000..af3f20937 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDdlNodes.java @@ -0,0 +1,172 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import org.apache.calcite.schema.ColumnStrategy; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; + +/** + * Utilities concerning {@link SqlNode} for DDL. + */ +public class SqlDdlNodes { + private SqlDdlNodes() {} + + /** Creates a CREATE SCHEMA. */ + public static SqlCreateSchema createSchema(SqlParserPos pos, boolean replace, + boolean ifNotExists, SqlIdentifier name) { + return new SqlCreateSchema(pos, replace, ifNotExists, name); + } + + /** Creates a CREATE FOREIGN SCHEMA. */ + public static SqlCreateForeignSchema createForeignSchema(SqlParserPos pos, + boolean replace, boolean ifNotExists, SqlIdentifier name, SqlNode type, + SqlNode library, SqlNodeList optionList) { + return new SqlCreateForeignSchema(pos, replace, ifNotExists, name, type, + library, optionList); + } + + /** Creates a CREATE TYPE. */ + public static SqlCreateType createType(SqlParserPos pos, boolean replace, + SqlIdentifier name, SqlNodeList attributeList, + SqlDataTypeSpec dataTypeSpec) { + return new SqlCreateType(pos, replace, name, attributeList, dataTypeSpec); + } + + /** Creates a CREATE TABLE. */ + public static SqlCreateTable createTable(SqlParserPos pos, boolean replace, + boolean ifNotExists, SqlIdentifier name, SqlNodeList columnList, + SqlNode query, SqlNodeList withOptions) { + return new SqlCreateTable(pos, replace, ifNotExists, name, columnList, + query, withOptions); + } + + /** Creates a CREATE TABLE LIKE. */ + public static SqlCreateTableLike createTableLike(SqlParserPos pos, boolean replace, + boolean ifNotExists, SqlIdentifier name, SqlIdentifier sourceTable, + SqlNodeList including, SqlNodeList excluding) { + return new SqlCreateTableLike(pos, replace, ifNotExists, name, + sourceTable, including, excluding); + } + + /** Creates a CREATE VIEW. */ + public static SqlCreateView createView(SqlParserPos pos, boolean replace, + SqlIdentifier name, SqlNodeList columnList, SqlNode query) { + return new SqlCreateView(pos, replace, name, columnList, query); + } + + /** Creates a CREATE MATERIALIZED VIEW. */ + public static SqlCreateMaterializedView createMaterializedView( + SqlParserPos pos, boolean replace, boolean ifNotExists, + SqlIdentifier name, SqlNodeList columnList, SqlNode query) { + return new SqlCreateMaterializedView(pos, replace, ifNotExists, name, + columnList, query); + } + + /** Creates a CREATE FUNCTION. */ + public static SqlCreateFunction createFunction( + SqlParserPos pos, boolean replace, boolean ifNotExists, + SqlIdentifier name, SqlNode className, SqlNodeList usingList) { + return new SqlCreateFunction(pos, replace, ifNotExists, name, + className, usingList); + } + + /** Creates a DROP [ FOREIGN ] SCHEMA. */ + public static SqlDropSchema dropSchema(SqlParserPos pos, boolean foreign, + boolean ifExists, SqlIdentifier name) { + return new SqlDropSchema(pos, foreign, ifExists, name); + } + + /** Creates a DROP TYPE. */ + public static SqlDropType dropType(SqlParserPos pos, boolean ifExists, + SqlIdentifier name) { + return new SqlDropType(pos, ifExists, name); + } + + /** Creates a DROP TABLE. */ + public static SqlDropTable dropTable(SqlParserPos pos, boolean ifExists, + SqlIdentifier name) { + return new SqlDropTable(pos, ifExists, name); + } + + /** Creates a TRUNCATE TABLE. */ + public static SqlTruncateTable truncateTable(SqlParserPos pos, + SqlIdentifier name, boolean continueIdentity) { + return new SqlTruncateTable(pos, name, continueIdentity); + } + + /** Creates a DROP VIEW. */ + public static SqlDrop dropView(SqlParserPos pos, boolean ifExists, + SqlIdentifier name) { + return new SqlDropView(pos, ifExists, name); + } + + /** Creates a DROP MATERIALIZED VIEW. */ + public static SqlDrop dropMaterializedView(SqlParserPos pos, + boolean ifExists, SqlIdentifier name) { + return new SqlDropMaterializedView(pos, ifExists, name); + } + + /** Creates a DROP FUNCTION. */ + public static SqlDrop dropFunction(SqlParserPos pos, + boolean ifExists, SqlIdentifier name) { + return new SqlDropFunction(pos, ifExists, name); + } + + /** Creates a column declaration. */ + public static SqlNode column(SqlParserPos pos, SqlIdentifier name, + SqlDataTypeSpec dataType, SqlNode expression, ColumnStrategy strategy) { + return new SqlColumnDeclaration(pos, name, dataType, expression, strategy); + } + + /** Creates an attribute definition. */ + public static SqlNode attribute(SqlParserPos pos, SqlIdentifier name, + SqlDataTypeSpec dataType, SqlNode expression, SqlCollation collation) { + return new SqlAttributeDefinition(pos, name, dataType, expression, collation); + } + + /** Creates a CHECK constraint. */ + public static SqlNode check(SqlParserPos pos, SqlIdentifier name, + SqlNode expression) { + return new SqlCheckConstraint(pos, name, expression); + } + + /** Creates a UNIQUE constraint. */ + public static SqlKeyConstraint unique(SqlParserPos pos, SqlIdentifier name, + SqlNodeList columnList) { + return new SqlKeyConstraint(pos, name, columnList); + } + + /** Creates a PRIMARY KEY constraint. */ + public static SqlKeyConstraint primary(SqlParserPos pos, SqlIdentifier name, + SqlNodeList columnList) { + return new SqlKeyConstraint(pos, name, columnList) { + @Override + public SqlOperator getOperator() { + return PRIMARY; + } + }; + } + + /** File type for CREATE FUNCTION. */ + public enum FileType { + FILE, + JAR, + ARCHIVE + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropFunction.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropFunction.java new file mode 100644 index 000000000..9d5f254b3 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropFunction.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.SqlSpecialOperator; +import org.apache.calcite.sql.parser.SqlParserPos; + +/** + * Parse tree for {@code DROP FUNCTION} statement. + */ +public class SqlDropFunction extends SqlDropObject { + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("DROP FUNCTION", SqlKind.DROP_FUNCTION); + + /** Creates a SqlDropFunction. */ + public SqlDropFunction(SqlParserPos pos, boolean ifExists, + SqlIdentifier name) { + super(OPERATOR, pos, ifExists, name); + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropMaterializedView.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropMaterializedView.java new file mode 100644 index 000000000..1cac04712 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropMaterializedView.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.SqlSpecialOperator; +import org.apache.calcite.sql.parser.SqlParserPos; + +/** + * Parse tree for {@code DROP MATERIALIZED VIEW} statement. + */ +public class SqlDropMaterializedView extends SqlDropObject { + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("DROP MATERIALIZED VIEW", + SqlKind.DROP_MATERIALIZED_VIEW); + + /** Creates a SqlDropMaterializedView. */ + SqlDropMaterializedView(SqlParserPos pos, boolean ifExists, + SqlIdentifier name) { + super(OPERATOR, pos, ifExists, name); + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropObject.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropObject.java new file mode 100644 index 000000000..00fb45ba5 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropObject.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import com.google.common.collect.ImmutableList; +import java.util.List; +import org.apache.calcite.jdbc.CalcitePrepare; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; + +/** + * Base class for parse trees of {@code DROP TABLE}, {@code DROP VIEW}, + * {@code DROP MATERIALIZED VIEW} and {@code DROP TYPE} statements. + */ +public abstract class SqlDropObject extends SqlDrop { + public final SqlIdentifier name; + + /** Creates a SqlDropObject. */ + SqlDropObject(SqlOperator operator, SqlParserPos pos, boolean ifExists, + SqlIdentifier name) { + super(operator, pos, ifExists); + this.name = name; + } + + @Override + public List getOperandList() { + return ImmutableList.of(name); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + writer.keyword(getOperator().getName()); // "DROP TABLE" etc. + if (ifExists) { + writer.keyword("IF EXISTS"); + } + name.unparse(writer, leftPrec, rightPrec); + } + + public void execute(CalcitePrepare.Context context) {} +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropSchema.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropSchema.java new file mode 100644 index 000000000..5346f4742 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropSchema.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import com.google.common.collect.ImmutableList; +import java.util.List; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; + +/** + * Parse tree for {@code DROP SCHEMA} statement. + */ +public class SqlDropSchema extends SqlDrop { + private final boolean foreign; + public final SqlIdentifier name; + + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("DROP SCHEMA", SqlKind.DROP_SCHEMA); + + /** Creates a SqlDropSchema. */ + SqlDropSchema(SqlParserPos pos, boolean foreign, boolean ifExists, + SqlIdentifier name) { + super(OPERATOR, pos, ifExists); + this.foreign = foreign; + this.name = name; + } + + @Override + public List getOperandList() { + return ImmutableList.of( + SqlLiteral.createBoolean(foreign, SqlParserPos.ZERO), name); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + writer.keyword("DROP"); + if (foreign) { + writer.keyword("FOREIGN"); + } + writer.keyword("SCHEMA"); + if (ifExists) { + writer.keyword("IF EXISTS"); + } + name.unparse(writer, leftPrec, rightPrec); + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropTable.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropTable.java new file mode 100644 index 000000000..e344c1e29 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropTable.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.SqlSpecialOperator; +import org.apache.calcite.sql.parser.SqlParserPos; + +/** + * Parse tree for {@code DROP TABLE} statement. + */ +public class SqlDropTable extends SqlDropObject { + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("DROP TABLE", SqlKind.DROP_TABLE); + + /** Creates a SqlDropTable. */ + SqlDropTable(SqlParserPos pos, boolean ifExists, SqlIdentifier name) { + super(OPERATOR, pos, ifExists, name); + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropType.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropType.java new file mode 100644 index 000000000..a9584b716 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropType.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.SqlSpecialOperator; +import org.apache.calcite.sql.parser.SqlParserPos; + +/** + * Parse tree for {@code DROP TYPE} statement. + */ +public class SqlDropType extends SqlDropObject { + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("DROP TYPE", SqlKind.DROP_TYPE); + + SqlDropType(SqlParserPos pos, boolean ifExists, SqlIdentifier name) { + super(OPERATOR, pos, ifExists, name); + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropView.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropView.java new file mode 100644 index 000000000..5f28a156c --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlDropView.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import org.apache.calcite.sql.SqlIdentifier; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.SqlOperator; +import org.apache.calcite.sql.SqlSpecialOperator; +import org.apache.calcite.sql.parser.SqlParserPos; + +/** + * Parse tree for {@code DROP VIEW} statement. + */ +public class SqlDropView extends SqlDropObject { + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("DROP VIEW", SqlKind.DROP_VIEW); + + /** Creates a SqlDropView. */ + SqlDropView(SqlParserPos pos, boolean ifExists, SqlIdentifier name) { + super(OPERATOR, pos, ifExists, name); + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlKeyConstraint.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlKeyConstraint.java new file mode 100644 index 000000000..0108764e4 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlKeyConstraint.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import java.util.List; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; +import org.apache.calcite.util.ImmutableNullableList; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * Parse tree for {@code UNIQUE}, {@code PRIMARY KEY} constraints. + * + *

+ * And {@code FOREIGN KEY}, when we support it. + */ +public class SqlKeyConstraint extends SqlCall { + private static final SqlSpecialOperator UNIQUE = + new SqlSpecialOperator("UNIQUE", SqlKind.UNIQUE); + + protected static final SqlSpecialOperator PRIMARY = + new SqlSpecialOperator("PRIMARY KEY", SqlKind.PRIMARY_KEY); + + private final @Nullable SqlIdentifier name; + private final SqlNodeList columnList; + + /** Creates a SqlKeyConstraint. */ + SqlKeyConstraint(SqlParserPos pos, @Nullable SqlIdentifier name, + SqlNodeList columnList) { + super(pos); + this.name = name; + this.columnList = columnList; + } + + /** Creates a UNIQUE constraint. */ + public static SqlKeyConstraint unique(SqlParserPos pos, SqlIdentifier name, + SqlNodeList columnList) { + return new SqlKeyConstraint(pos, name, columnList); + } + + /** Creates a PRIMARY KEY constraint. */ + public static SqlKeyConstraint primary(SqlParserPos pos, SqlIdentifier name, + SqlNodeList columnList) { + return new SqlKeyConstraint(pos, name, columnList) { + @Override + public SqlOperator getOperator() { + return PRIMARY; + } + }; + } + + @Override + public SqlOperator getOperator() { + return UNIQUE; + } + + @SuppressWarnings("nullness") + @Override + public List getOperandList() { + return ImmutableNullableList.of(name, columnList); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + if (name != null) { + writer.keyword("CONSTRAINT"); + name.unparse(writer, 0, 0); + } + writer.keyword(getOperator().getName()); // "UNIQUE" or "PRIMARY KEY" + columnList.unparse(writer, 1, 1); + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlTruncateTable.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlTruncateTable.java new file mode 100644 index 000000000..d4a223d3a --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/ddl/SqlTruncateTable.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.ddl; + +import com.google.common.collect.ImmutableList; +import java.util.List; +import org.apache.calcite.sql.*; +import org.apache.calcite.sql.parser.SqlParserPos; + +/** + * Parse tree for {@code TRUNCATE TABLE} statement. + */ +public class SqlTruncateTable extends SqlTruncate { + + private static final SqlOperator OPERATOR = + new SqlSpecialOperator("TRUNCATE TABLE", SqlKind.TRUNCATE_TABLE); + public final SqlIdentifier name; + public final boolean continueIdentify; + + /** + * Creates a SqlTruncateTable. + */ + public SqlTruncateTable(SqlParserPos pos, SqlIdentifier name, boolean continueIdentify) { + super(OPERATOR, pos); + this.name = name; + this.continueIdentify = continueIdentify; + } + + @Override + public List getOperandList() { + return ImmutableList.of(name); + } + + @Override + public void unparse(SqlWriter writer, int leftPrec, int rightPrec) { + writer.keyword("TRUNCATE"); + writer.keyword("TABLE"); + name.unparse(writer, leftPrec, rightPrec); + if (continueIdentify) { + writer.keyword("CONTINUE IDENTITY"); + } else { + writer.keyword("RESTART IDENTITY"); + + } + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/flatgeobuf/FlatGeoBufTableFactory.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/flatgeobuf/FlatGeoBufTableFactory.java new file mode 100644 index 000000000..a244b023e --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/flatgeobuf/FlatGeoBufTableFactory.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.flatgeobuf; + +import java.io.File; +import java.io.IOException; +import java.util.Map; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.schema.SchemaPlus; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.TableFactory; + +/** + * A table factory for creating FlatGeoBuf tables. + */ +public class FlatGeoBufTableFactory implements TableFactory

{ + + /** + * Constructor. + */ + public FlatGeoBufTableFactory() {} + + @Override + public Table create( + SchemaPlus schema, + String name, + Map operand, + RelDataType rowType) { + // Get the file path from the operand + String filePath = (String) operand.get("file"); + if (filePath == null) { + throw new IllegalArgumentException("File path must be specified in the 'file' operand"); + } + + try { + File file = new File(filePath); + return new FlatGeoBufTable(file); + } catch (IOException e) { + throw new RuntimeException("Failed to create FlatGeoBufTable from file: " + filePath, e); + } + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/geopackage/GeoPackageTableFactory.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/geopackage/GeoPackageTableFactory.java new file mode 100644 index 000000000..fa544d5af --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/geopackage/GeoPackageTableFactory.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.geopackage; + +import java.io.File; +import java.io.IOException; +import java.util.Map; +import org.apache.calcite.jdbc.JavaTypeFactoryImpl; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.schema.SchemaPlus; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.TableFactory; + +/** + * A table factory for creating GeoPackage tables. + */ +public class GeoPackageTableFactory implements TableFactory
{ + + /** + * Constructor. + */ + public GeoPackageTableFactory() {} + + @Override + public Table create( + SchemaPlus schema, + String name, + Map operand, + RelDataType rowType) { + if (operand.size() < 2) { + throw new IllegalArgumentException("Missing file path and table name for GeoPackage table"); + } + try { + String filePath = (String) operand.get("file"); + if (filePath == null) { + throw new IllegalArgumentException("File path must be specified in the 'file' operand"); + } + + String tableName = (String) operand.get("table"); + if (tableName == null) { + throw new IllegalArgumentException("Table name must be specified in the 'table' operand"); + } + + // Create a type factory - Calcite doesn't expose one through SchemaPlus + RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl(); + + return new GeoPackageTable(new File(filePath), tableName, typeFactory); + } catch (IOException e) { + throw new RuntimeException("Failed to create GeoPackage table", e); + } + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/geoparquet/GeoParquetTableFactory.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/geoparquet/GeoParquetTableFactory.java new file mode 100644 index 000000000..396d45fad --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/geoparquet/GeoParquetTableFactory.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.geoparquet; + +import java.io.File; +import java.io.IOException; +import java.util.Map; +import org.apache.calcite.jdbc.JavaTypeFactoryImpl; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.schema.SchemaPlus; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.TableFactory; + +/** + * A table factory for creating GeoParquet tables. + */ +public class GeoParquetTableFactory implements TableFactory
{ + + /** + * Constructor. + */ + public GeoParquetTableFactory() {} + + @Override + public Table create( + SchemaPlus schema, + String name, + Map operand, + RelDataType rowType) { + if (operand.size() < 2) { + throw new IllegalArgumentException("Missing file path for GeoParquet table"); + } + try { + String filePath = (String) operand.get("file"); + if (filePath == null) { + throw new IllegalArgumentException("File path must be specified in the 'file' operand"); + } + + // Create a type factory - Calcite doesn't expose one through SchemaPlus + RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl(); + + return new GeoParquetTable(new File(filePath), typeFactory); + } catch (IOException e) { + throw new RuntimeException("Failed to create GeoParquet table", e); + } + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapSchema.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapSchema.java new file mode 100644 index 000000000..26ab95a64 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapSchema.java @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.openstreetmap; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import org.apache.baremaps.openstreetmap.OpenStreetMapFormat; +import org.apache.baremaps.openstreetmap.model.Entity; +import org.apache.baremaps.openstreetmap.pbf.PbfEntityReader; +import org.apache.baremaps.openstreetmap.xml.XmlEntityReader; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.impl.AbstractSchema; + +/** + * A Calcite schema implementation for OpenStreetMap data. This schema provides access to + * OpenStreetMap files through the Apache Calcite framework for SQL querying. + */ +public class OpenStreetMapSchema extends AbstractSchema { + + private final File directory; + private final Map tableMap; + private final RelDataTypeFactory typeFactory; + + /** + * Constructs an OpenStreetMapSchema with the specified directory. + * + * @param directory the directory containing OpenStreetMap files + * @param typeFactory the type factory to use for creating tables + * @throws IOException if an I/O error occurs + */ + public OpenStreetMapSchema(File directory, RelDataTypeFactory typeFactory) throws IOException { + this.directory = Objects.requireNonNull(directory, "Directory cannot be null"); + this.typeFactory = Objects.requireNonNull(typeFactory, "Type factory cannot be null"); + this.tableMap = new HashMap<>(); + + // Process files in the directory + File[] files = directory.listFiles((dir, name) -> name.toLowerCase().endsWith(".pbf") || + name.toLowerCase().endsWith(".osm.pbf") || + name.toLowerCase().endsWith(".xml") || + name.toLowerCase().endsWith(".osm")); + + if (files != null) { + for (File file : files) { + // Extract the base name without extension (e.g., "sample" from "sample.osm.pbf") + String fileName = file.getName(); + String tableName = fileName; + + // Remove all extensions (e.g., "sample.osm.pbf" -> "sample") + while (tableName.contains(".")) { + int lastDotIndex = tableName.lastIndexOf('.'); + if (lastDotIndex > 0) { + tableName = tableName.substring(0, lastDotIndex); + } else { + break; + } + } + + // Create the table with the file reference + tableMap.put(tableName, createTable(file)); + } + } + } + + /** + * Constructs an OpenStreetMapSchema with a single file. + * + * @param file the OpenStreetMap file + * @param typeFactory the type factory to use for creating tables + * @throws IOException if an I/O error occurs + */ + public OpenStreetMapSchema(File file, RelDataTypeFactory typeFactory, boolean isDirectory) + throws IOException { + if (isDirectory) { + // If isDirectory is true, treat the file as a directory + this.directory = Objects.requireNonNull(file, "Directory cannot be null"); + this.typeFactory = Objects.requireNonNull(typeFactory, "Type factory cannot be null"); + this.tableMap = new HashMap<>(); + + // Process files in the directory + File[] files = file.listFiles((dir, name) -> name.toLowerCase().endsWith(".pbf") || + name.toLowerCase().endsWith(".osm.pbf") || + name.toLowerCase().endsWith(".xml") || + name.toLowerCase().endsWith(".osm")); + + if (files != null) { + for (File osmFile : files) { + // Extract the base name without extension (e.g., "sample" from "sample.osm.pbf") + String fileName = osmFile.getName(); + String tableName = fileName; + + // Remove all extensions (e.g., "sample.osm.pbf" -> "sample") + while (tableName.contains(".")) { + int lastDotIndex = tableName.lastIndexOf('.'); + if (lastDotIndex > 0) { + tableName = tableName.substring(0, lastDotIndex); + } else { + break; + } + } + + // Create the table with the file reference + tableMap.put(tableName, createTable(osmFile)); + } + } + } else { + // If isDirectory is false, treat the file as a single file + this.directory = Objects.requireNonNull(file, "File cannot be null"); + this.typeFactory = Objects.requireNonNull(typeFactory, "Type factory cannot be null"); + this.tableMap = new HashMap<>(); + + // Extract the base name without extension (e.g., "sample" from "sample.osm.pbf") + String fileName = file.getName(); + String tableName = fileName; + + // Remove all extensions (e.g., "sample.osm.pbf" -> "sample") + while (tableName.contains(".")) { + int lastDotIndex = tableName.lastIndexOf('.'); + if (lastDotIndex > 0) { + tableName = tableName.substring(0, lastDotIndex); + } else { + break; + } + } + + // Create the table with the file reference + tableMap.put(tableName, createTable(file)); + } + } + + /** + * Creates a table for the given file. + * + * @param file the OpenStreetMap file + * @return the created table + */ + private Table createTable(File file) { + // Determine the appropriate entity reader based on file extension + OpenStreetMapFormat.EntityReader entityReader; + if (file.getName().toLowerCase().endsWith(".pbf") || + file.getName().toLowerCase().endsWith(".osm.pbf")) { + PbfEntityReader pbfReader = new PbfEntityReader(); + pbfReader.setGeometries(true); + pbfReader.setCoordinateMap(new HashMap<>()); + pbfReader.setReferenceMap(new HashMap<>()); + entityReader = pbfReader; + } else { + XmlEntityReader xmlReader = new XmlEntityReader(); + xmlReader.setGeometries(true); + xmlReader.setCoordinateMap(new HashMap<>()); + xmlReader.setReferenceMap(new HashMap<>()); + entityReader = xmlReader; + } + + // Create the table with the file reference + return new OpenStreetMapTable(file, entityReader); + } + + @Override + protected Map getTableMap() { + return tableMap; + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapTable.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapTable.java index 93cd87d44..6be494888 100644 --- a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapTable.java +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapTable.java @@ -17,6 +17,9 @@ package org.apache.baremaps.calcite.openstreetmap; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; import java.io.InputStream; import java.util.*; import java.util.stream.Stream; @@ -40,19 +43,19 @@ */ public class OpenStreetMapTable extends AbstractTable implements ScannableTable { + private final File file; private final EntityReader entityReader; - private final InputStream inputStream; private RelDataType rowType; /** * Constructs an OpenStreetMapTable with the specified parameters. * + * @param file the OpenStreetMap file * @param entityReader the EntityReader for parsing the OSM data - * @param inputStream the input stream containing the OSM data */ - public OpenStreetMapTable(EntityReader entityReader, InputStream inputStream) { - this.entityReader = entityReader; - this.inputStream = inputStream; + public OpenStreetMapTable(File file, EntityReader entityReader) { + this.file = Objects.requireNonNull(file, "File cannot be null"); + this.entityReader = Objects.requireNonNull(entityReader, "Entity reader cannot be null"); } @Override @@ -97,7 +100,11 @@ public Enumerable scan(DataContext root) { return new AbstractEnumerable() { @Override public Enumerator enumerator() { - return new OpenStreetMapEnumerator(entityReader, inputStream); + try { + return new OpenStreetMapEnumerator(entityReader, new FileInputStream(file)); + } catch (IOException e) { + throw new RuntimeException("Failed to open input stream", e); + } } }; } diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapTableFactory.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapTableFactory.java new file mode 100644 index 000000000..0da0b99c6 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapTableFactory.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.openstreetmap; + +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Map; +import org.apache.baremaps.openstreetmap.pbf.PbfEntityReader; +import org.apache.baremaps.openstreetmap.xml.XmlEntityReader; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.schema.SchemaPlus; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.TableFactory; + +/** + * A table factory for creating OpenStreetMap tables. + */ +public class OpenStreetMapTableFactory implements TableFactory
{ + + /** + * Constructor. + */ + public OpenStreetMapTableFactory() {} + + @Override + public Table create( + SchemaPlus schema, + String name, + Map operand, + RelDataType rowType) { + // Get the file path from the operand + String filePath = (String) operand.get("file"); + if (filePath == null) { + throw new IllegalArgumentException("File path must be specified in the 'file' operand"); + } + + try { + // Create a new input stream from the file + InputStream inputStream = new FileInputStream(filePath); + + // Create an entity reader based on the file extension + if (filePath.endsWith(".pbf") || filePath.endsWith(".osm.pbf")) { + return createTableFromPbf(Paths.get(filePath)); + } else if (filePath.endsWith(".xml") || filePath.endsWith(".osm")) { + return createTableFromXml(Paths.get(filePath)); + } else { + throw new IllegalArgumentException( + "Unsupported file format. Supported formats are .pbf, .osm.pbf, .xml, and .osm"); + } + } catch (IOException e) { + throw new RuntimeException("Failed to create OpenStreetMapTable from file: " + filePath, e); + } + } + + /** + * Create a table from a PBF file. + * + * @param path the path to the PBF file + * @return the table + * @throws IOException if an I/O error occurs + */ + private OpenStreetMapTable createTableFromPbf(Path path) throws IOException { + PbfEntityReader reader = new PbfEntityReader(); + reader.setGeometries(true); + return new OpenStreetMapTable(path.toFile(), reader); + } + + /** + * Create a table from an XML file. + * + * @param path the path to the XML file + * @return the table + * @throws IOException if an I/O error occurs + */ + private OpenStreetMapTable createTableFromXml(Path path) throws IOException { + XmlEntityReader reader = new XmlEntityReader(); + reader.setGeometries(true); + return new OpenStreetMapTable(path.toFile(), reader); + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresDdlExecutor.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresDdlExecutor.java index afb7dbe22..6ff2cb35f 100644 --- a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresDdlExecutor.java +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresDdlExecutor.java @@ -28,6 +28,8 @@ import java.sql.SQLException; import java.util.*; import javax.sql.DataSource; +import org.apache.baremaps.calcite.ddl.*; +import org.apache.baremaps.calcite.sql.BaremapsSqlDdlParser; import org.apache.calcite.adapter.java.JavaTypeFactory; import org.apache.calcite.adapter.jdbc.JdbcSchema; import org.apache.calcite.avatica.AvaticaUtils; @@ -49,14 +51,12 @@ import org.apache.calcite.server.DdlExecutor; import org.apache.calcite.server.DdlExecutorImpl; import org.apache.calcite.sql.*; -import org.apache.calcite.sql.ddl.*; import org.apache.calcite.sql.dialect.CalciteSqlDialect; import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.calcite.sql.parser.SqlAbstractParserImpl; import org.apache.calcite.sql.parser.SqlParseException; import org.apache.calcite.sql.parser.SqlParserImplFactory; import org.apache.calcite.sql.parser.SqlParserPos; -import org.apache.calcite.sql.parser.ddl.SqlDdlParserImpl; import org.apache.calcite.sql.pretty.SqlPrettyWriter; import org.apache.calcite.sql.validate.SqlValidator; import org.apache.calcite.tools.*; @@ -89,7 +89,7 @@ public class PostgresDdlExecutor extends DdlExecutorImpl { new SqlParserImplFactory() { @Override public SqlAbstractParserImpl getParser(Reader stream) { - return SqlDdlParserImpl.FACTORY.getParser(stream); + return BaremapsSqlDdlParser.FACTORY.getParser(stream); } @Override @@ -98,6 +98,12 @@ public DdlExecutor getDdlExecutor() { } }; + /** + * Record to hold schema information. + */ + private record SchemaInfo(String name, @Nullable CalciteSchema schema) { + } + /** * Default constructor that assumes a DataSource is provided by PostgresSchemaFactory or similar. * Protected only to allow sub-classing; use {@link #INSTANCE} where possible. @@ -138,7 +144,7 @@ public static void clearThreadLocalDataSource() { * Returns the schema in which to create an object; the left part is null if the schema does not * exist. */ - static Pair<@Nullable CalciteSchema, String> schema( + static SchemaInfo schema( CalcitePrepare.Context context, boolean mutable, SqlIdentifier id) { final String name; final List path; @@ -156,11 +162,11 @@ public static void clearThreadLocalDataSource() { @Nullable CalciteSchema subSchema = schema.getSubSchema(p, true); if (subSchema == null) { - return Pair.of(null, name); + return new SchemaInfo(name, null); } schema = subSchema; } - return Pair.of(schema, name); + return new SchemaInfo(name, schema); } /** @@ -217,8 +223,8 @@ private DataSource getDataSource(CalcitePrepare.Context context) { /** Truncate the PostgreSQL table. */ static void truncate(SqlIdentifier name, CalcitePrepare.Context context, DataSource dataSource) { - final Pair<@Nullable CalciteSchema, String> pair = schema(context, true, name); - final String tableName = pair.right; + final SchemaInfo schemaInfo = schema(context, true, name); + final String tableName = schemaInfo.name(); try (Connection connection = dataSource.getConnection(); PreparedStatement stmt = connection.prepareStatement("TRUNCATE TABLE \"" + tableName + "\"")) { @@ -274,13 +280,13 @@ static void populate(SqlIdentifier name, SqlNode query, /** Executes a {@code CREATE FOREIGN SCHEMA} command. */ public void execute(SqlCreateForeignSchema create, CalcitePrepare.Context context) { - final Pair<@Nullable CalciteSchema, String> pair = + final SchemaInfo schemaInfo = schema(context, true, create.name); - requireNonNull(pair.left); // TODO: should not assume parent schema exists - if (pair.left.plus().getSubSchema(pair.right) != null) { + requireNonNull(schemaInfo.schema()); // TODO: should not assume parent schema exists + if (schemaInfo.schema().plus().getSubSchema(schemaInfo.name()) != null) { if (!create.getReplace() && !create.ifNotExists) { throw SqlUtil.newContextException(create.name.getParserPosition(), - RESOURCE.schemaExists(pair.right)); + RESOURCE.schemaExists(schemaInfo.name())); } } final Schema subSchema; @@ -319,8 +325,8 @@ public void execute(SqlCreateForeignSchema create, requireNonNull(value(option.right))); } subSchema = - schemaFactory.create(pair.left.plus(), pair.right, operandMap); - pair.left.add(pair.right, subSchema); + schemaFactory.create(schemaInfo.schema().plus(), schemaInfo.name(), operandMap); + schemaInfo.schema().add(schemaInfo.name(), subSchema); } /** Executes a {@code CREATE FUNCTION} command. */ @@ -335,11 +341,11 @@ public void execute(SqlCreateFunction create, */ public void execute(SqlDropObject drop, CalcitePrepare.Context context) { - final Pair<@Nullable CalciteSchema, String> pair = + final SchemaInfo schemaInfo = schema(context, false, drop.name); final @Nullable CalciteSchema schema = - pair.left; // null if schema does not exist - final String objectName = pair.right; + schemaInfo.schema(); // null if schema does not exist + final String objectName = schemaInfo.name(); boolean existed; switch (drop.getKind()) { @@ -435,12 +441,12 @@ public void execute(SqlDropObject drop, */ public void execute(SqlTruncateTable truncate, CalcitePrepare.Context context) { - final Pair<@Nullable CalciteSchema, String> pair = + final SchemaInfo schemaInfo = schema(context, true, truncate.name); - if (pair.left == null - || pair.left.plus().getTable(pair.right) == null) { + if (schemaInfo.schema() == null + || schemaInfo.schema().plus().getTable(schemaInfo.name()) == null) { throw SqlUtil.newContextException(truncate.name.getParserPosition(), - RESOURCE.tableNotFound(pair.right)); + RESOURCE.tableNotFound(schemaInfo.name())); } if (!truncate.continueIdentify) { @@ -456,14 +462,14 @@ public void execute(SqlTruncateTable truncate, */ public void execute(SqlCreateMaterializedView create, CalcitePrepare.Context context) { - final Pair<@Nullable CalciteSchema, String> pair = + final SchemaInfo schemaInfo = schema(context, true, create.name); - if (pair.left == null) { + if (schemaInfo.schema() == null) { throw new RuntimeException("Schema " + create.name + " not found"); } - final String viewName = pair.right; - final CalciteSchema schema = pair.left; + final String viewName = schemaInfo.name(); + final CalciteSchema schema = schemaInfo.schema(); final SqlNode query = renameColumns(create.columnList, create.query); // Get target schema @@ -507,16 +513,16 @@ public void execute(SqlCreateMaterializedView create, /** Executes a {@code CREATE SCHEMA} command. */ public void execute(SqlCreateSchema create, CalcitePrepare.Context context) { - final Pair<@Nullable CalciteSchema, String> pair = + final SchemaInfo schemaInfo = schema(context, true, create.name); - requireNonNull(pair.left); // TODO: should not assume parent schema exists - if (pair.left.plus().getSubSchema(pair.right) != null) { + requireNonNull(schemaInfo.schema()); // TODO: should not assume parent schema exists + if (schemaInfo.schema().plus().getSubSchema(schemaInfo.name()) != null) { if (create.ifNotExists) { return; } if (!create.getReplace()) { throw SqlUtil.newContextException(create.name.getParserPosition(), - RESOURCE.schemaExists(pair.right)); + RESOURCE.schemaExists(schemaInfo.name())); } } @@ -525,25 +531,25 @@ public void execute(SqlCreateSchema create, try { try (Connection connection = ds.getConnection(); PreparedStatement stmt = connection.prepareStatement( - "CREATE SCHEMA IF NOT EXISTS \"" + pair.right + "\"")) { + "CREATE SCHEMA IF NOT EXISTS \"" + schemaInfo.name() + "\"")) { stmt.executeUpdate(); } } catch (SQLException e) { - throw new RuntimeException("Error creating schema in PostgreSQL: " + pair.right, e); + throw new RuntimeException("Error creating schema in PostgreSQL: " + schemaInfo.name(), e); } final Schema subSchema = new AbstractSchema(); - pair.left.add(pair.right, subSchema); + schemaInfo.schema().add(schemaInfo.name(), subSchema); } /** Executes a {@code DROP SCHEMA} command. */ public void execute(SqlDropSchema drop, CalcitePrepare.Context context) { - final Pair<@Nullable CalciteSchema, String> pair = + final SchemaInfo schemaInfo = schema(context, false, drop.name); - final String name = pair.right; - final boolean existed = pair.left != null - && pair.left.removeSubSchema(name); + final String name = schemaInfo.name(); + final boolean existed = schemaInfo.schema() != null + && schemaInfo.schema().removeSubSchema(name); if (existed) { // Drop PostgreSQL schema @@ -566,9 +572,9 @@ public void execute(SqlDropSchema drop, /** Executes a {@code CREATE TABLE} command. */ public void execute(SqlCreateTable create, CalcitePrepare.Context context) { - final Pair<@Nullable CalciteSchema, String> pair = + final SchemaInfo schemaInfo = schema(context, true, create.name); - requireNonNull(pair.left); // TODO: should not assume parent schema exists + requireNonNull(schemaInfo.schema()); // TODO: should not assume parent schema exists final JavaTypeFactory typeFactory = context.getTypeFactory(); final RelDataType queryRowType; if (create.query != null) { @@ -576,7 +582,7 @@ public void execute(SqlCreateTable create, final String sql = create.query.toSqlString(CalciteSqlDialect.DEFAULT).getSql(); final ViewTableMacro viewTableMacro = - ViewTable.viewMacro(pair.left.plus(), sql, pair.left.path(null), + ViewTable.viewMacro(schemaInfo.schema().plus(), sql, schemaInfo.schema().path(null), context.getObjectPath(), false); final TranslatableTable x = viewTableMacro.apply(ImmutableList.of()); queryRowType = x.getRowType(typeFactory); @@ -612,7 +618,7 @@ public void execute(SqlCreateTable create, if (create.ifNotExists) { createTableSql.append("IF NOT EXISTS "); } - createTableSql.append("\"").append(pair.right).append("\" ("); + createTableSql.append("\"").append(schemaInfo.name()).append("\" ("); boolean first = true; // Process column declarations for the CREATE TABLE statement @@ -666,7 +672,7 @@ public void execute(SqlCreateTable create, createTableSql.append(")"); - if (pair.left.plus().getTable(pair.right) != null) { + if (schemaInfo.schema().plus().getTable(schemaInfo.name()) != null) { // Table exists. if (create.ifNotExists) { return; @@ -674,7 +680,7 @@ public void execute(SqlCreateTable create, if (!create.getReplace()) { // They did not specify IF NOT EXISTS, so give error. throw SqlUtil.newContextException(create.name.getParserPosition(), - RESOURCE.tableExists(pair.right)); + RESOURCE.tableExists(schemaInfo.name())); } // Drop existing table @@ -682,11 +688,12 @@ public void execute(SqlCreateTable create, DataSource ds = getDataSource(context); try (Connection connection = ds.getConnection(); PreparedStatement stmt = - connection.prepareStatement("DROP TABLE \"" + pair.right + "\"")) { + connection.prepareStatement("DROP TABLE \"" + schemaInfo.name() + "\"")) { stmt.executeUpdate(); } } catch (SQLException e) { - throw new RuntimeException("Error dropping existing table in PostgreSQL: " + pair.right, e); + throw new RuntimeException( + "Error dropping existing table in PostgreSQL: " + schemaInfo.name(), e); } } @@ -700,32 +707,32 @@ public void execute(SqlCreateTable create, // Create Calcite wrapper for the table PostgresModifiableTable table = - new PostgresModifiableTable(ds, pair.right, context.getTypeFactory()); - pair.left.add(pair.right, table); + new PostgresModifiableTable(ds, schemaInfo.name(), context.getTypeFactory()); + schemaInfo.schema().add(schemaInfo.name(), table); // Populate the table if query is provided if (create.query != null) { populate(create.name, create.query, context); } } catch (SQLException e) { - throw new RuntimeException("Error creating table in PostgreSQL: " + pair.right, e); + throw new RuntimeException("Error creating table in PostgreSQL: " + schemaInfo.name(), e); } } /** Executes a {@code CREATE VIEW} command. */ public void execute(SqlCreateView create, CalcitePrepare.Context context) { - final Pair<@Nullable CalciteSchema, String> pair = + final SchemaInfo schemaInfo = schema(context, true, create.name); - requireNonNull(pair.left); // TODO: should not assume parent schema exists - final SchemaPlus schemaPlus = pair.left.plus(); - for (Function function : schemaPlus.getFunctions(pair.right)) { + requireNonNull(schemaInfo.schema()); // TODO: should not assume parent schema exists + final SchemaPlus schemaPlus = schemaInfo.schema().plus(); + for (Function function : schemaPlus.getFunctions(schemaInfo.name())) { if (function.getParameters().isEmpty()) { if (!create.getReplace()) { throw SqlUtil.newContextException(create.name.getParserPosition(), - RESOURCE.viewExists(pair.right)); + RESOURCE.viewExists(schemaInfo.name())); } - pair.left.removeFunction(pair.right); + schemaInfo.schema().removeFunction(schemaInfo.name()); } } @@ -739,7 +746,7 @@ public void execute(SqlCreateView create, if (create.getReplace()) { createViewSql += " OR REPLACE"; } - createViewSql += " VIEW \"" + pair.right + "\" AS " + sql; + createViewSql += " VIEW \"" + schemaInfo.name() + "\" AS " + sql; try (Connection connection = ds.getConnection(); PreparedStatement stmt = connection.prepareStatement(createViewSql)) { @@ -748,11 +755,11 @@ public void execute(SqlCreateView create, // Create Calcite wrapper for the view final ViewTableMacro viewTableMacro = - ViewTable.viewMacro(schemaPlus, sql, pair.left.path(null), + ViewTable.viewMacro(schemaPlus, sql, schemaInfo.schema().path(null), context.getObjectPath(), false); - schemaPlus.add(pair.right, viewTableMacro); + schemaPlus.add(schemaInfo.name(), viewTableMacro); } catch (SQLException e) { - throw new RuntimeException("Error creating view in PostgreSQL: " + pair.right, e); + throw new RuntimeException("Error creating view in PostgreSQL: " + schemaInfo.name(), e); } } diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresModifiableTable.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresModifiableTable.java index 8502c3dcd..2e437f76f 100644 --- a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresModifiableTable.java +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresModifiableTable.java @@ -23,7 +23,7 @@ import javax.sql.DataSource; import org.apache.baremaps.calcite.data.DataColumn; import org.apache.baremaps.calcite.data.DataColumnFixed; -import org.apache.baremaps.calcite.data.DataSchema; +import org.apache.baremaps.calcite.data.DataTableSchema; import org.apache.baremaps.postgres.copy.*; import org.apache.baremaps.postgres.metadata.ColumnResult; import org.apache.baremaps.postgres.metadata.DatabaseMetadata; @@ -67,9 +67,10 @@ public class PostgresModifiableTable extends AbstractTable implements ScannableTable, ModifiableTable, QueryableTable { private final DataSource dataSource; + private final String schema; private final String tableName; private final RelDataType rowType; - private final DataSchema dataSchema; + private final DataTableSchema dataTableSchema; /** * Constructs a PostgisTable with the specified data source and table name. @@ -79,7 +80,7 @@ public class PostgresModifiableTable extends AbstractTable * @throws SQLException if an SQL error occurs */ public PostgresModifiableTable(DataSource dataSource, String tableName) throws SQLException { - this(dataSource, tableName, new org.apache.calcite.jdbc.JavaTypeFactoryImpl()); + this(dataSource, "public", tableName, new org.apache.calcite.jdbc.JavaTypeFactoryImpl()); } /** @@ -93,10 +94,26 @@ public PostgresModifiableTable(DataSource dataSource, String tableName) throws S public PostgresModifiableTable(DataSource dataSource, String tableName, RelDataTypeFactory typeFactory) throws SQLException { + this(dataSource, "public", tableName, typeFactory); + } + + /** + * Constructs a PostgisTable with the specified data source, schema, table name, and type factory. + * + * @param dataSource the data source for the PostgreSQL connection + * @param schema the schema name + * @param tableName the name of the table to access + * @param typeFactory the type factory + * @throws SQLException if an SQL error occurs + */ + public PostgresModifiableTable(DataSource dataSource, String schema, String tableName, + RelDataTypeFactory typeFactory) + throws SQLException { this.dataSource = dataSource; + this.schema = schema; this.tableName = tableName; - this.dataSchema = discoverSchema(); - this.rowType = PostgresTypeConversion.toRelDataType(typeFactory, dataSchema); + this.dataTableSchema = discoverSchema(); + this.rowType = PostgresTypeConversion.toRelDataType(typeFactory, dataTableSchema); } /** @@ -105,13 +122,13 @@ public PostgresModifiableTable(DataSource dataSource, String tableName, * @return the schema of the table * @throws SQLException if an SQL error occurs */ - private DataSchema discoverSchema() throws SQLException { + private DataTableSchema discoverSchema() throws SQLException { List columns = new ArrayList<>(); // Use DatabaseMetadata to get column information DatabaseMetadata metadata = new DatabaseMetadata(dataSource); var tableMetadata = - metadata.getTableMetaData(null, null, tableName, new String[] {"TABLE", "VIEW"}) + metadata.getTableMetaData(schema, null, tableName, new String[] {"TABLE", "VIEW"}) .stream() .filter(meta -> meta.table().tableName().equalsIgnoreCase(tableName)) .findFirst(); @@ -120,8 +137,9 @@ private DataSchema discoverSchema() throws SQLException { if (tableMetadata.isEmpty()) { try (Connection connection = dataSource.getConnection(); PreparedStatement stmt = connection.prepareStatement( - "SELECT EXISTS (SELECT 1 FROM pg_matviews WHERE matviewname = ?)")) { - stmt.setString(1, tableName); + "SELECT EXISTS (SELECT 1 FROM pg_matviews WHERE schemaname = ? AND matviewname = ?)")) { + stmt.setString(1, schema); + stmt.setString(2, tableName); try (ResultSet rs = stmt.executeQuery()) { if (rs.next() && rs.getBoolean(1)) { // It's a materialized view, get column information directly @@ -131,7 +149,7 @@ private DataSchema discoverSchema() throws SQLException { } // If we get here, it's neither a regular table/view nor a materialized view - throw new SQLException("Table not found: " + tableName); + throw new SQLException("Table not found: " + schema + "." + tableName); } // Get geometry column types for the current table. @@ -162,7 +180,7 @@ private DataSchema discoverSchema() throws SQLException { columns.add(new DataColumnFixed(columnName, cardinality, relDataType)); } - return new DataSchema(tableName, columns); + return new DataTableSchema(tableName, columns); } /** @@ -172,7 +190,7 @@ private DataSchema discoverSchema() throws SQLException { * @return the schema constructed from direct column query * @throws SQLException if an SQL error occurs */ - private DataSchema getSchemaFromDirectQuery() throws SQLException { + private DataTableSchema getSchemaFromDirectQuery() throws SQLException { List columns = new ArrayList<>(); try (Connection connection = dataSource.getConnection()) { @@ -184,13 +202,15 @@ private DataSchema getSchemaFromDirectQuery() throws SQLException { "FROM pg_catalog.pg_attribute a " + "JOIN pg_catalog.pg_class c ON a.attrelid = c.oid " + "LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace " + - "WHERE c.relname = ? " + + "WHERE n.nspname = ? " + + " AND c.relname = ? " + " AND a.attnum > 0 " + " AND NOT a.attisdropped " + "ORDER BY a.attnum"; try (PreparedStatement stmt = connection.prepareStatement(sql)) { - stmt.setString(1, tableName); + stmt.setString(1, schema); + stmt.setString(2, tableName); try (ResultSet rs = stmt.executeQuery()) { while (rs.next()) { @@ -276,10 +296,10 @@ private DataSchema getSchemaFromDirectQuery() throws SQLException { } if (columns.isEmpty()) { - throw new SQLException("No columns found for table: " + tableName); + throw new SQLException("No columns found for table: " + schema + "." + tableName); } - return new DataSchema(tableName, columns); + return new DataTableSchema(tableName, columns); } /** @@ -350,10 +370,12 @@ private Map getGeometryTypes() throws SQLException { try (Connection connection = dataSource.getConnection()) { // Query to get geometry column information - String sql = "SELECT f_geometry_column, type FROM geometry_columns WHERE f_table_name = ?"; + String sql = + "SELECT f_geometry_column, type FROM geometry_columns WHERE f_table_schema = ? AND f_table_name = ?"; try (PreparedStatement stmt = connection.prepareStatement(sql)) { - stmt.setString(1, tableName); + stmt.setString(1, schema); + stmt.setString(2, tableName); try (ResultSet rs = stmt.executeQuery()) { while (rs.next()) { @@ -378,8 +400,8 @@ public RelDataType getRowType(RelDataTypeFactory typeFactory) { * * @return the schema of the table */ - public DataSchema schema() { - return dataSchema; + public DataTableSchema schema() { + return dataTableSchema; } /** @@ -400,12 +422,21 @@ protected String getTableName() { return tableName; } + /** + * Returns the schema name of this table. + * + * @return the schema name + */ + protected String getSchema() { + return schema; + } + @Override public Enumerable scan(DataContext root) { return new AbstractEnumerable<>() { @Override public Enumerator enumerator() { - return new PostgisEnumerator(dataSource, dataSchema); + return new PostgisEnumerator(dataSource, dataTableSchema, schema, tableName); } }; } @@ -461,7 +492,8 @@ private class PostgisCollectionAdapter extends AbstractCollection { public int size() { try (Connection connection = dataSource.getConnection(); Statement stmt = connection.createStatement(); - ResultSet rs = stmt.executeQuery("SELECT COUNT(*) FROM \"" + tableName + "\"")) { + ResultSet rs = + stmt.executeQuery("SELECT COUNT(*) FROM \"" + schema + "\".\"" + tableName + "\"")) { if (rs.next()) { return rs.getInt(1); } @@ -490,14 +522,15 @@ public boolean contains(Object o) { Object[] values = (Object[]) o; StringBuilder whereClause = new StringBuilder(); - for (int i = 0; i < dataSchema.columns().size(); i++) { + for (int i = 0; i < dataTableSchema.columns().size(); i++) { if (i > 0) { whereClause.append(" AND "); } - whereClause.append("\"").append(dataSchema.columns().get(i).name()).append("\" = ?"); + whereClause.append("\"").append(dataTableSchema.columns().get(i).name()).append("\" = ?"); } - String sql = "SELECT COUNT(*) FROM \"" + tableName + "\" WHERE " + whereClause; + String sql = + "SELECT COUNT(*) FROM \"" + schema + "\".\"" + tableName + "\" WHERE " + whereClause; try (PreparedStatement statement = connection.prepareStatement(sql)) { for (int i = 0; i < values.length; i++) { statement.setObject(i + 1, values[i]); @@ -517,7 +550,8 @@ public boolean contains(Object o) { @Override public Iterator iterator() { return new Iterator() { - private final PostgisEnumerator enumerator = new PostgisEnumerator(dataSource, dataSchema); + private final PostgisEnumerator enumerator = + new PostgisEnumerator(dataSource, dataTableSchema, schema, tableName); private boolean hasNext = enumerator.moveNext(); @Override @@ -542,12 +576,13 @@ public Object[] toArray() { List result = new ArrayList<>(); try (Connection connection = dataSource.getConnection(); Statement stmt = connection.createStatement(); - ResultSet rs = stmt.executeQuery("SELECT * FROM \"" + tableName + "\"")) { + ResultSet rs = + stmt.executeQuery("SELECT * FROM \"" + schema + "\".\"" + tableName + "\"")) { while (rs.next()) { - Object[] row = new Object[dataSchema.columns().size()]; - for (int i = 0; i < dataSchema.columns().size(); i++) { - DataColumn column = dataSchema.columns().get(i); + Object[] row = new Object[dataTableSchema.columns().size()]; + for (int i = 0; i < dataTableSchema.columns().size(); i++) { + DataColumn column = dataTableSchema.columns().get(i); if (column.sqlTypeName() == SqlTypeName.GEOMETRY) { byte[] wkb = rs.getBytes(i + 1); row[i] = deserializeWkb(wkb); @@ -580,8 +615,8 @@ public boolean addAll(Collection c) { try (Connection connection = dataSource.getConnection()) { // Use COPY API for better performance PGConnection pgConnection = connection.unwrap(PGConnection.class); - String copyCommand = "COPY \"" + tableName + "\" (" + - dataSchema.columns().stream() + String copyCommand = "COPY \"" + schema + "\".\"" + tableName + "\" (" + + dataTableSchema.columns().stream() .map(col -> "\"" + col.name() + "\"") .collect(java.util.stream.Collectors.joining(", ")) + @@ -593,16 +628,17 @@ public boolean addAll(Collection c) { for (Object[] objects : c) { Objects.requireNonNull(objects, "Values cannot be null"); - if (objects.length != dataSchema.columns().size()) { + if (objects.length != dataTableSchema.columns().size()) { throw new IllegalArgumentException( - "Expected " + dataSchema.columns().size() + " values, got " + objects.length); + "Expected " + dataTableSchema.columns().size() + " values, got " + + objects.length); } - writer.startRow(dataSchema.columns().size()); + writer.startRow(dataTableSchema.columns().size()); for (int i = 0; i < objects.length; i++) { Object value = objects[i]; - DataColumn column = dataSchema.columns().get(i); + DataColumn column = dataTableSchema.columns().get(i); if (value == null) { writer.writeNull(); @@ -647,7 +683,7 @@ public boolean addAll(Collection c) { public void clear() { try (Connection connection = dataSource.getConnection(); Statement stmt = connection.createStatement()) { - stmt.executeUpdate("DELETE FROM \"" + tableName + "\""); + stmt.executeUpdate("DELETE FROM \"" + schema + "\".\"" + tableName + "\""); } catch (SQLException e) { throw new RuntimeException("Error clearing table", e); } @@ -697,7 +733,9 @@ private byte[] serializeWkb(Geometry geometry) { */ private static class PostgisEnumerator implements Enumerator { private final DataSource dataSource; - private final DataSchema schema; + private final DataTableSchema schema; + private final String tableSchema; + private final String tableName; private Connection connection; private Statement statement; private ResultSet resultSet; @@ -710,9 +748,12 @@ private static class PostgisEnumerator implements Enumerator { * @param dataSource the data source * @param schema the schema */ - public PostgisEnumerator(DataSource dataSource, DataSchema schema) { + public PostgisEnumerator(DataSource dataSource, DataTableSchema schema, String tableSchema, + String tableName) { this.dataSource = dataSource; this.schema = schema; + this.tableSchema = tableSchema; + this.tableName = tableName; this.current = null; try { this.connection = dataSource.getConnection(); @@ -742,7 +783,7 @@ private String buildSelectQuery() { } return "SELECT " + String.join(", ", columnProjections) + - " FROM \"" + schema.name() + "\""; + " FROM \"" + tableSchema + "\".\"" + tableName + "\""; } private Object[] convertCurrentRow() throws SQLException { diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresSchema.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresSchema.java new file mode 100644 index 000000000..072695084 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresSchema.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.postgres; + +import java.sql.SQLException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import javax.sql.DataSource; +import org.apache.baremaps.postgres.metadata.DatabaseMetadata; +import org.apache.baremaps.postgres.metadata.TableMetadata; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.impl.AbstractSchema; + +public class PostgresSchema extends AbstractSchema { + + private final DataSource dataSource; + private final String schemaName; + private final RelDataTypeFactory typeFactory; + + public PostgresSchema(DataSource dataSource, String schemaName, RelDataTypeFactory typeFactory) { + this.dataSource = dataSource; + this.schemaName = schemaName; + this.typeFactory = typeFactory; + } + + @Override + protected Map getTableMap() { + DatabaseMetadata databaseMetadata = new DatabaseMetadata(dataSource); + Map tableMap = new HashMap<>(); + try { + List tables = + databaseMetadata.getTableMetaData(null, schemaName, null, new String[] {"TABLE"}); + for (TableMetadata table : tables) { + String tableName = table.table().tableName(); + Table calciteTable = new PostgresModifiableTable(dataSource, tableName, typeFactory); + tableMap.put(tableName, calciteTable); + } + } catch (SQLException e) { + throw new RuntimeException(e); + } + return tableMap; + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresTypeConversion.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresTypeConversion.java index 2c2d79f29..2324a9119 100644 --- a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresTypeConversion.java +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/postgres/PostgresTypeConversion.java @@ -20,7 +20,7 @@ import java.util.ArrayList; import java.util.List; import org.apache.baremaps.calcite.data.DataColumn; -import org.apache.baremaps.calcite.data.DataSchema; +import org.apache.baremaps.calcite.data.DataTableSchema; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeFactory; import org.apache.calcite.sql.type.SqlTypeName; @@ -171,7 +171,7 @@ public static String toPostgresTypeString(RelDataType type) { * @return the corresponding RelDataType */ public static RelDataType toRelDataType(RelDataTypeFactory typeFactory, - DataSchema schema) { + DataTableSchema schema) { List types = new ArrayList<>(); List names = new ArrayList<>(); diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/rpsl/RpslSchema.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/rpsl/RpslSchema.java new file mode 100644 index 000000000..f4780ecad --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/rpsl/RpslSchema.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.rpsl; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.impl.AbstractSchema; + +/** + * A Calcite schema implementation for RPSL data. This schema provides access to RPSL files through + * the Apache Calcite framework for SQL querying. + */ +public class RpslSchema extends AbstractSchema { + + private final File directory; + private final Map tableMap; + private final RelDataTypeFactory typeFactory; + + /** + * Constructs a RpslSchema with the specified directory. + * + * @param directory the directory containing RPSL files + * @param typeFactory the type factory to use for creating tables + * @throws IOException if an I/O error occurs + */ + public RpslSchema(File directory, RelDataTypeFactory typeFactory) throws IOException { + this.directory = Objects.requireNonNull(directory, "Directory cannot be null"); + this.typeFactory = Objects.requireNonNull(typeFactory, "Type factory cannot be null"); + this.tableMap = new HashMap<>(); + + // Process files in the directory + File[] files = directory.listFiles( + (dir, name) -> name.toLowerCase().endsWith(".rpsl") || name.toLowerCase().endsWith(".txt")); + + if (files != null) { + for (File file : files) { + // Extract the base name without extension (e.g., "routing" from "routing.rpsl") + String fileName = file.getName(); + String tableName = fileName; + + // Remove all extensions (e.g., "routing.rpsl" -> "routing") + while (tableName.contains(".")) { + int lastDotIndex = tableName.lastIndexOf('.'); + if (lastDotIndex > 0) { + tableName = tableName.substring(0, lastDotIndex); + } else { + break; + } + } + + // Create the table with the file reference + tableMap.put(tableName, createTable(file)); + } + } + } + + /** + * Constructs a RpslSchema with a single file. + * + * @param file the RPSL file + * @param typeFactory the type factory to use for creating tables + * @throws IOException if an I/O error occurs + */ + public RpslSchema(File file, RelDataTypeFactory typeFactory, boolean isDirectory) + throws IOException { + if (isDirectory) { + // If isDirectory is true, treat the file as a directory + this.directory = Objects.requireNonNull(file, "Directory cannot be null"); + this.typeFactory = Objects.requireNonNull(typeFactory, "Type factory cannot be null"); + this.tableMap = new HashMap<>(); + + // Process files in the directory + File[] files = file.listFiles((dir, name) -> name.toLowerCase().endsWith(".rpsl") + || name.toLowerCase().endsWith(".txt")); + + if (files != null) { + for (File rpslFile : files) { + // Extract the base name without extension (e.g., "routing" from "routing.rpsl") + String fileName = rpslFile.getName(); + String tableName = fileName; + + // Remove all extensions (e.g., "routing.rpsl" -> "routing") + while (tableName.contains(".")) { + int lastDotIndex = tableName.lastIndexOf('.'); + if (lastDotIndex > 0) { + tableName = tableName.substring(0, lastDotIndex); + } else { + break; + } + } + + // Create the table with the file reference + tableMap.put(tableName, createTable(rpslFile)); + } + } + } else { + // If isDirectory is false, treat the file as a single file + this.directory = Objects.requireNonNull(file, "File cannot be null"); + this.typeFactory = Objects.requireNonNull(typeFactory, "Type factory cannot be null"); + this.tableMap = new HashMap<>(); + + // Extract the base name without extension (e.g., "routing" from "routing.rpsl") + String fileName = file.getName(); + String tableName = fileName; + + // Remove all extensions (e.g., "routing.rpsl" -> "routing") + while (tableName.contains(".")) { + int lastDotIndex = tableName.lastIndexOf('.'); + if (lastDotIndex > 0) { + tableName = tableName.substring(0, lastDotIndex); + } else { + break; + } + } + + // Create the table with the file reference + tableMap.put(tableName, createTable(file)); + } + } + + /** + * Creates a table for the given file. + * + * @param file the RPSL file + * @return the created table + * @throws IOException if an I/O error occurs + */ + private Table createTable(File file) throws IOException { + return new RpslTable(file); + } + + @Override + protected Map getTableMap() { + return tableMap; + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/rpsl/RpslTableFactory.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/rpsl/RpslTableFactory.java new file mode 100644 index 000000000..698311669 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/rpsl/RpslTableFactory.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.rpsl; + +import java.io.File; +import java.io.IOException; +import java.util.Map; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.schema.SchemaPlus; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.TableFactory; + +/** + * A table factory for creating RPSL tables. + */ +public class RpslTableFactory implements TableFactory
{ + + /** + * Constructor. + */ + public RpslTableFactory() {} + + @Override + public Table create( + SchemaPlus schema, + String name, + Map operand, + RelDataType rowType) { + // Get the file path from the operand + String filePath = (String) operand.get("file"); + if (filePath == null) { + throw new IllegalArgumentException("File path must be specified in the 'file' operand"); + } + + try { + File file = new File(filePath); + return new RpslTable(file); + } catch (IOException e) { + throw new RuntimeException("Failed to create RpslTable from file: " + filePath, e); + } + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/shapefile/ShapefileSchema.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/shapefile/ShapefileSchema.java new file mode 100644 index 000000000..38f615f0e --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/shapefile/ShapefileSchema.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.shapefile; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.impl.AbstractSchema; + +/** + * A Calcite schema implementation for shapefile data. This schema provides access to shapefiles + * through the Apache Calcite framework for SQL querying. + */ +public class ShapefileSchema extends AbstractSchema { + + private final File directory; + private final Map tableMap; + private final RelDataTypeFactory typeFactory; + + /** + * Constructs a ShapefileSchema with the specified directory. + * + * @param directory the directory containing shapefiles + * @param typeFactory the type factory to use for creating tables + * @throws IOException if an I/O error occurs + */ + public ShapefileSchema(File directory, RelDataTypeFactory typeFactory) throws IOException { + this.directory = Objects.requireNonNull(directory, "Directory cannot be null"); + this.typeFactory = Objects.requireNonNull(typeFactory, "Type factory cannot be null"); + this.tableMap = new HashMap<>(); + + // Process files in the directory + File[] files = directory.listFiles((dir, name) -> name.toLowerCase().endsWith(".shp")); + + if (files != null) { + for (File file : files) { + // Extract the base name without extension (e.g., "countries" from "countries.shp") + String fileName = file.getName(); + String tableName = fileName; + + // Remove all extensions (e.g., "countries.shp" -> "countries") + while (tableName.contains(".")) { + int lastDotIndex = tableName.lastIndexOf('.'); + if (lastDotIndex > 0) { + tableName = tableName.substring(0, lastDotIndex); + } else { + break; + } + } + + // Create the table with the file reference + tableMap.put(tableName, createTable(file)); + } + } + } + + /** + * Constructs a ShapefileSchema with a single file. + * + * @param file the shapefile + * @param typeFactory the type factory to use for creating tables + * @throws IOException if an I/O error occurs + */ + public ShapefileSchema(File file, RelDataTypeFactory typeFactory, boolean isDirectory) + throws IOException { + if (isDirectory) { + // If isDirectory is true, treat the file as a directory + this.directory = Objects.requireNonNull(file, "Directory cannot be null"); + this.typeFactory = Objects.requireNonNull(typeFactory, "Type factory cannot be null"); + this.tableMap = new HashMap<>(); + + // Process files in the directory + File[] files = file.listFiles((dir, name) -> name.toLowerCase().endsWith(".shp")); + + if (files != null) { + for (File shapeFile : files) { + // Extract the base name without extension (e.g., "countries" from "countries.shp") + String fileName = shapeFile.getName(); + String tableName = fileName; + + // Remove all extensions (e.g., "countries.shp" -> "countries") + while (tableName.contains(".")) { + int lastDotIndex = tableName.lastIndexOf('.'); + if (lastDotIndex > 0) { + tableName = tableName.substring(0, lastDotIndex); + } else { + break; + } + } + + // Create the table with the file reference + tableMap.put(tableName, createTable(shapeFile)); + } + } + } else { + // If isDirectory is false, treat the file as a single file + this.directory = Objects.requireNonNull(file, "File cannot be null"); + this.typeFactory = Objects.requireNonNull(typeFactory, "Type factory cannot be null"); + this.tableMap = new HashMap<>(); + + // Extract the base name without extension (e.g., "countries" from "countries.shp") + String fileName = file.getName(); + String tableName = fileName; + + // Remove all extensions (e.g., "countries.shp" -> "countries") + while (tableName.contains(".")) { + int lastDotIndex = tableName.lastIndexOf('.'); + if (lastDotIndex > 0) { + tableName = tableName.substring(0, lastDotIndex); + } else { + break; + } + } + + // Create the table with the file reference + tableMap.put(tableName, createTable(file)); + } + } + + /** + * Creates a table for the given file. + * + * @param file the shapefile + * @return the created table + * @throws IOException if an I/O error occurs + */ + private Table createTable(File file) throws IOException { + return new ShapefileTable(file); + } + + @Override + protected Map getTableMap() { + return tableMap; + } +} diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/shapefile/ShapefileTable.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/shapefile/ShapefileTable.java index dae1c2428..3382bde3b 100644 --- a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/shapefile/ShapefileTable.java +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/shapefile/ShapefileTable.java @@ -45,8 +45,6 @@ public class ShapefileTable extends AbstractTable implements ScannableTable { private static final Logger logger = LoggerFactory.getLogger(ShapefileTable.class); private final File file; - private final ShapefileReader shapeFile; - private final String tableName; private final List fieldDescriptors; private RelDataType rowType; @@ -58,9 +56,10 @@ public class ShapefileTable extends AbstractTable implements ScannableTable { */ public ShapefileTable(File file) throws IOException { this.file = file; - this.shapeFile = new ShapefileReader(file.getPath()); - this.tableName = file.getName(); - this.fieldDescriptors = shapeFile.getDatabaseFieldsDescriptors(); + // Create a ShapefileReader to get field descriptors + try (ShapefileReader shapeFile = new ShapefileReader(file.getPath())) { + this.fieldDescriptors = shapeFile.getDatabaseFieldsDescriptors(); + } } @Override @@ -138,6 +137,7 @@ public Enumerator enumerator() { */ private static class ShapefileEnumerator implements Enumerator { private final File file; + private ShapefileReader shapeFile; private ShapefileInputStream shapefileInputStream; private List current; @@ -148,7 +148,7 @@ public ShapefileEnumerator(File file) { private void initialize() { try { - var shapeFile = new ShapefileReader(file.getPath()); + this.shapeFile = new ShapefileReader(file.getPath()); this.shapefileInputStream = shapeFile.read(); } catch (IOException e) { throw new RuntimeException("Failed to initialize shapefile iterator", e); @@ -177,7 +177,7 @@ public boolean moveNext() { @Override public void reset() { try { - shapefileInputStream.close(); + closeResources(); initialize(); } catch (IOException e) { throw new RuntimeException("Failed to reset shapefile iterator", e); @@ -187,12 +187,22 @@ public void reset() { @Override public void close() { try { - if (shapefileInputStream != null) { - shapefileInputStream.close(); - } + closeResources(); } catch (IOException e) { - // Ignore + logger.error("Error closing shapefile resources", e); + } + } + + private void closeResources() throws IOException { + if (shapefileInputStream != null) { + shapefileInputStream.close(); + shapefileInputStream = null; + } + if (shapeFile != null) { + shapeFile.close(); + shapeFile = null; } + current = null; } } } diff --git a/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/shapefile/ShapefileTableFactory.java b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/shapefile/ShapefileTableFactory.java new file mode 100644 index 000000000..5139122d3 --- /dev/null +++ b/baremaps-calcite/src/main/java/org/apache/baremaps/calcite/shapefile/ShapefileTableFactory.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.shapefile; + +import java.io.File; +import java.io.IOException; +import java.util.Map; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.schema.SchemaPlus; +import org.apache.calcite.schema.Table; +import org.apache.calcite.schema.TableFactory; + +/** + * A table factory for creating Shapefile tables. + */ +public class ShapefileTableFactory implements TableFactory
{ + + /** + * Constructor. + */ + public ShapefileTableFactory() {} + + @Override + public Table create( + SchemaPlus schema, + String name, + Map operand, + RelDataType rowType) { + // Get the file path from the operand + String filePath = (String) operand.get("file"); + if (filePath == null) { + throw new IllegalArgumentException("File path must be specified in the 'file' operand"); + } + + try { + File file = new File(filePath); + return new ShapefileTable(file); + } catch (IOException e) { + throw new RuntimeException("Failed to create ShapefileTable from file: " + filePath, e); + } + } +} diff --git a/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/BaremapsDdlExecutorTest.java b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/BaremapsDdlExecutorTest.java index af177ee38..562be04ac 100644 --- a/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/BaremapsDdlExecutorTest.java +++ b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/BaremapsDdlExecutorTest.java @@ -40,47 +40,78 @@ import org.locationtech.jts.geom.GeometryFactory; import org.locationtech.jts.geom.Point; +/** + * Tests for the BaremapsDdlExecutor class, which provides DDL execution abilities for Baremaps + * tables through Calcite. + */ public class BaremapsDdlExecutorTest { private static final String CITY_DATA_DIR = "city_data"; private static final String CITY_POPULATION_DIR = "city_population"; private static final String POPULATION_DATA_DIR = "population_data"; + private static final String TEST_TABLE_DATA_DIR = "test_table_data"; + private DataCollection cityCollection; private DataCollection populationCollection; + private DataCollection testTableCollection; + private DataTableSchema citySchema; + private DataTableSchema populationSchema; + private DataTableSchema testTableSchema; + private RelDataTypeFactory typeFactory; @BeforeEach void setUp() throws IOException { - // Create and initialize city collection + // Initialize type factory + typeFactory = new JavaTypeFactoryImpl(); + + // Create schemas + citySchema = createCitySchema(); + populationSchema = createPopulationSchema(); + testTableSchema = createTestTableSchema(); + + // Create and initialize collections MemoryMappedDirectory cityMemory = new MemoryMappedDirectory(Paths.get(CITY_DATA_DIR)); - DataSchema citySchema = createCitySchema(); DataRowType cityRowType = new DataRowType(citySchema); cityCollection = AppendOnlyLog.builder() .dataType(cityRowType) .memory(cityMemory) .build(); - // Create and initialize population collection MemoryMappedDirectory populationMemory = new MemoryMappedDirectory(Paths.get(POPULATION_DATA_DIR)); - DataSchema populationSchema = createPopulationSchema(); DataRowType populationRowType = new DataRowType(populationSchema); populationCollection = AppendOnlyLog.builder() .dataType(populationRowType) .memory(populationMemory) .build(); + + MemoryMappedDirectory testTableMemory = + new MemoryMappedDirectory(Paths.get(TEST_TABLE_DATA_DIR)); + DataRowType testTableRowType = new DataRowType(testTableSchema); + testTableCollection = AppendOnlyLog.builder() + .dataType(testTableRowType) + .memory(testTableMemory) + .build(); } @AfterEach - void tearDown() throws IOException { + void tearDown() throws Exception { // Clean up directories FileUtils.deleteRecursively(Paths.get(CITY_DATA_DIR).toFile()); FileUtils.deleteRecursively(Paths.get(POPULATION_DATA_DIR).toFile()); FileUtils.deleteRecursively(Paths.get(CITY_POPULATION_DIR).toFile()); + FileUtils.deleteRecursively(Paths.get(TEST_TABLE_DATA_DIR).toFile()); + + // Clean up any additional directories created during test execution + FileUtils.deleteRecursively(Paths.get("options_table").toFile()); + FileUtils.deleteRecursively(Paths.get("options_table_as").toFile()); + FileUtils.deleteRecursively(Paths.get("new_table").toFile()); + FileUtils.deleteRecursively(Paths.get("city_view").toFile()); + FileUtils.deleteRecursively(Paths.get("test_table").toFile()); } - private DataSchema createCitySchema() { - RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl(); - return new DataSchema("city", List.of( + private DataTableSchema createCitySchema() { + return new DataTableSchema("city", List.of( new DataColumnFixed("id", DataColumn.Cardinality.REQUIRED, typeFactory.createSqlType(org.apache.calcite.sql.type.SqlTypeName.INTEGER)), new DataColumnFixed("name", DataColumn.Cardinality.OPTIONAL, @@ -89,21 +120,26 @@ private DataSchema createCitySchema() { typeFactory.createSqlType(org.apache.calcite.sql.type.SqlTypeName.GEOMETRY)))); } - private DataSchema createPopulationSchema() { - RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl(); - return new DataSchema("population", List.of( + private DataTableSchema createPopulationSchema() { + return new DataTableSchema("population", List.of( new DataColumnFixed("city_id", DataColumn.Cardinality.REQUIRED, typeFactory.createSqlType(org.apache.calcite.sql.type.SqlTypeName.INTEGER)), new DataColumnFixed("population", DataColumn.Cardinality.OPTIONAL, typeFactory.createSqlType(org.apache.calcite.sql.type.SqlTypeName.INTEGER)))); } - @Test - void testMaterializedView() throws SQLException { - GeometryFactory geometryFactory = new GeometryFactory(); - RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl(); + private DataTableSchema createTestTableSchema() { + return new DataTableSchema("test_table", List.of( + new DataColumnFixed("id", DataColumn.Cardinality.REQUIRED, + typeFactory.createSqlType(org.apache.calcite.sql.type.SqlTypeName.INTEGER)), + new DataColumnFixed("name", DataColumn.Cardinality.OPTIONAL, + typeFactory.createSqlType(org.apache.calcite.sql.type.SqlTypeName.VARCHAR)))); + } - // Configure Calcite connection properties + /** + * Helper method to set up Calcite connection with BaremapsDdlExecutor + */ + private Connection createCalciteConnection() throws SQLException { Properties info = new Properties(); info.setProperty("lex", "MYSQL"); info.setProperty("caseSensitive", "false"); @@ -112,38 +148,56 @@ void testMaterializedView() throws SQLException { info.setProperty("parserFactory", BaremapsDdlExecutor.class.getName() + "#PARSER_FACTORY"); info.setProperty("materializationsEnabled", "true"); - try (Connection connection = DriverManager.getConnection("jdbc:calcite:", info)) { + return DriverManager.getConnection("jdbc:calcite:", info); + } + + /** + * Helper method to populate test data + */ + private void populateTestData() { + GeometryFactory geometryFactory = new GeometryFactory(); + + // Add data to the city table + Point parisPoint = geometryFactory.createPoint(new Coordinate(2.3522, 48.8566)); + Point nyPoint = geometryFactory.createPoint(new Coordinate(-74.0060, 40.7128)); + + cityCollection.add(new DataRow(citySchema, List.of(1, "Paris", parisPoint))); + cityCollection.add(new DataRow(citySchema, List.of(2, "New York", nyPoint))); + + // Add data to the population table + populationCollection.add(new DataRow(populationSchema, List.of(1, 2_161_000))); + populationCollection.add(new DataRow(populationSchema, List.of(2, 8_336_000))); + + // Add data to the test table + testTableCollection.add(new DataRow(testTableSchema, List.of(1, "Test Name"))); + } + + @Test + void testMaterializedView() throws SQLException { + // Set up test data + populateTestData(); + + try (Connection connection = createCalciteConnection()) { CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); SchemaPlus rootSchema = calciteConnection.getRootSchema(); // Create the city table DataModifiableTable cityTable = new DataModifiableTable( "city", - createCitySchema(), + citySchema, cityCollection, typeFactory); - // Add data to the city table - Point parisPoint = geometryFactory.createPoint(new Coordinate(2.3522, 48.8566)); - Point nyPoint = geometryFactory.createPoint(new Coordinate(-74.0060, 40.7128)); - - cityCollection.add(new DataRow(createCitySchema(), List.of(1, "Paris", parisPoint))); - cityCollection.add(new DataRow(createCitySchema(), List.of(2, "New York", nyPoint))); - // Add city table to the schema rootSchema.add("city", cityTable); // Create the population table DataModifiableTable populationTable = new DataModifiableTable( "population", - createPopulationSchema(), + populationSchema, populationCollection, typeFactory); - // Add data to the population table - populationCollection.add(new DataRow(createPopulationSchema(), List.of(1, 2_161_000))); - populationCollection.add(new DataRow(createPopulationSchema(), List.of(2, 8_336_000))); - // Add population table to the schema rootSchema.add("population", populationTable); @@ -181,4 +235,227 @@ void testMaterializedView() throws SQLException { } } } + + @Test + void testCreateAndDropTable() throws SQLException { + // Set up test data + populateTestData(); + + try (Connection connection = createCalciteConnection()) { + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Create the test table + DataModifiableTable testTable = new DataModifiableTable( + "test_table", + testTableSchema, + testTableCollection, + typeFactory); + + // Add test table to the schema + rootSchema.add("test_table", testTable); + + // Test CREATE TABLE + try (Statement statement = connection.createStatement()) { + statement.execute("CREATE TABLE new_table (id INTEGER, name VARCHAR)"); + } + + // Add data to the new table + try (Statement statement = connection.createStatement()) { + statement.execute("INSERT INTO new_table VALUES (1, 'New Table Name')"); + } + + // Query the new table + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery("SELECT * FROM new_table")) { + assertTrue(resultSet.next()); + assertEquals(1, resultSet.getInt("id")); + assertEquals("New Table Name", resultSet.getString("name")); + } + + // Test DROP TABLE + try (Statement statement = connection.createStatement()) { + statement.execute("DROP TABLE new_table"); + } + + // Verify table no longer exists + try (Statement statement = connection.createStatement()) { + statement.executeQuery("SELECT * FROM new_table"); + fail("Table should have been dropped"); + } catch (SQLException e) { + // Expected exception + assertTrue(e.getMessage().contains("not found")); + } + } + } + + @Test + void testCreateAndDropView() throws SQLException { + // Set up test data + populateTestData(); + + try (Connection connection = createCalciteConnection()) { + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Create the city table + DataModifiableTable cityTable = new DataModifiableTable( + "city", + citySchema, + cityCollection, + typeFactory); + + // Add city table to the schema + rootSchema.add("city", cityTable); + + // Create the population table + DataModifiableTable populationTable = new DataModifiableTable( + "population", + populationSchema, + populationCollection, + typeFactory); + + // Add population table to the schema + rootSchema.add("population", populationTable); + + // Test CREATE VIEW + try (Statement statement = connection.createStatement()) { + statement.execute("CREATE VIEW city_view AS " + + "SELECT c.id, c.name, p.population " + + "FROM city c JOIN population p ON c.id = p.city_id"); + } + + // Query the view + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery("SELECT * FROM city_view ORDER BY id")) { + + // Verify the first row (Paris) + assertTrue(resultSet.next()); + assertEquals(1, resultSet.getInt("id")); + assertEquals("Paris", resultSet.getString("name")); + assertEquals(2_161_000, resultSet.getInt("population")); + + // Verify the second row (New York) + assertTrue(resultSet.next()); + assertEquals(2, resultSet.getInt("id")); + assertEquals("New York", resultSet.getString("name")); + assertEquals(8_336_000, resultSet.getInt("population")); + } + + // Test DROP VIEW + try (Statement statement = connection.createStatement()) { + statement.execute("DROP VIEW city_view"); + } + + // Verify view no longer exists + try (Statement statement = connection.createStatement()) { + statement.executeQuery("SELECT * FROM city_view"); + fail("View should have been dropped"); + } catch (SQLException e) { + // Expected exception + assertTrue(e.getMessage().contains("not found")); + } + } + } + + @Test + void testTruncateTable() throws SQLException { + // Set up test data + populateTestData(); + + try (Connection connection = createCalciteConnection()) { + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Create the test table + DataModifiableTable testTable = new DataModifiableTable( + "test_table", + testTableSchema, + testTableCollection, + typeFactory); + + // Add test table to the schema + rootSchema.add("test_table", testTable); + + // Force data to be persisted by accessing it + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery("SELECT * FROM test_table")) { + assertTrue(resultSet.next()); + assertEquals(1, resultSet.getInt("id")); + assertEquals("Test Name", resultSet.getString("name")); + } + + // Test TRUNCATE TABLE + try (Statement statement = connection.createStatement()) { + statement.execute("TRUNCATE TABLE test_table"); + } + + // Verify table is empty + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery("SELECT * FROM test_table")) { + assertFalse(resultSet.next()); + } + } + } + + @Test + void testCreateTableWithOptions() throws SQLException { + // Set up test data + populateTestData(); + + try (Connection connection = createCalciteConnection()) { + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Create the test table + DataModifiableTable testTable = new DataModifiableTable( + "test_table", + testTableSchema, + testTableCollection, + typeFactory); + + // Add test table to the schema + rootSchema.add("test_table", testTable); + + // Test CREATE TABLE with WITH options + try (Statement statement = connection.createStatement()) { + statement.execute("CREATE TABLE options_table (id INTEGER, name VARCHAR) " + + "WITH (option1 = 'value1', option2 = 'value2')"); + } + + // Add data to the new table + try (Statement statement = connection.createStatement()) { + statement.execute("INSERT INTO options_table VALUES (1, 'Options Table Name')"); + } + + // Query the new table + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery("SELECT * FROM options_table")) { + assertTrue(resultSet.next()); + assertEquals(1, resultSet.getInt("id")); + assertEquals("Options Table Name", resultSet.getString("name")); + } + + // Test CREATE TABLE AS with WITH options + try (Statement statement = connection.createStatement()) { + statement.execute("CREATE TABLE options_table_as AS " + + "SELECT id, name FROM test_table " + + "WITH (option3 = 'value3', option4 = 'value4')"); + } + + // Query the new table created with AS + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery("SELECT * FROM options_table_as")) { + assertTrue(resultSet.next()); + assertEquals(1, resultSet.getInt("id")); + assertEquals("Test Name", resultSet.getString("name")); + } + + // Clean up + try (Statement statement = connection.createStatement()) { + statement.execute("DROP TABLE options_table"); + statement.execute("DROP TABLE options_table_as"); + } + } + } } diff --git a/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/data/DataSchemaTest.java b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/data/DataSchemaTest.java new file mode 100644 index 000000000..299562b30 --- /dev/null +++ b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/data/DataSchemaTest.java @@ -0,0 +1,259 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.data; + +import static org.junit.jupiter.api.Assertions.*; + +import com.fasterxml.jackson.databind.ObjectMapper; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.file.Path; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import org.apache.calcite.config.CalciteConnectionConfig; +import org.apache.calcite.config.CalciteConnectionConfigImpl; +import org.apache.calcite.config.CalciteConnectionProperty; +import org.apache.calcite.jdbc.CalciteConnection; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.schema.SchemaPlus; +import org.apache.calcite.schema.Table; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +class DataSchemaTest { + + @TempDir + Path tempDir; + + private File sampleDataDir; + private File citiesDir; + private File countriesDir; + private RelDataTypeFactory typeFactory; + private Connection connection; + private DataSchema schema; + + @BeforeEach + void setup() throws IOException, SQLException { + // Create the test directory structure + sampleDataDir = tempDir.resolve("data").toFile(); + citiesDir = new File(sampleDataDir, "cities"); + countriesDir = new File(sampleDataDir, "countries"); + + sampleDataDir.mkdirs(); + citiesDir.mkdirs(); + countriesDir.mkdirs(); + + // Create schema files + createCitiesSchema(); + createCountriesSchema(); + + // Initialize the type factory + Properties props = new Properties(); + props.setProperty(CalciteConnectionProperty.CASE_SENSITIVE.camelName(), "false"); + CalciteConnectionConfig config = new CalciteConnectionConfigImpl(props); + + // Create a connection to get the type factory + connection = DriverManager.getConnection("jdbc:calcite:", props); + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + typeFactory = calciteConnection.getTypeFactory(); + } + + @AfterEach + void cleanup() throws SQLException { + if (connection != null && !connection.isClosed()) { + connection.close(); + connection = null; + } + + // Force garbage collection to release file handles + System.gc(); + } + + private void createCitiesSchema() throws IOException { + // Create a schema for cities + Map schemaMap = new HashMap<>(); + schemaMap.put("name", "cities"); + + // Define columns + Map[] columns = new Map[3]; + + // city column + Map cityColumn = new HashMap<>(); + cityColumn.put("name", "city"); + cityColumn.put("cardinality", "REQUIRED"); + cityColumn.put("sqlTypeName", "VARCHAR"); + columns[0] = cityColumn; + + // country column + Map countryColumn = new HashMap<>(); + countryColumn.put("name", "country"); + countryColumn.put("cardinality", "REQUIRED"); + countryColumn.put("sqlTypeName", "VARCHAR"); + columns[1] = countryColumn; + + // population column + Map populationColumn = new HashMap<>(); + populationColumn.put("name", "population"); + populationColumn.put("cardinality", "REQUIRED"); + populationColumn.put("sqlTypeName", "INTEGER"); + columns[2] = populationColumn; + + schemaMap.put("columns", columns); + + // Write schema to file + ObjectMapper mapper = new ObjectMapper(); + try (FileOutputStream fos = new FileOutputStream(new File(citiesDir, "schema.json"))) { + mapper.writeValue(fos, schemaMap); + } + } + + private void createCountriesSchema() throws IOException { + // Create a schema for countries + Map schemaMap = new HashMap<>(); + schemaMap.put("name", "countries"); + + // Define columns + Map[] columns = new Map[3]; + + // country column + Map countryColumn = new HashMap<>(); + countryColumn.put("name", "country"); + countryColumn.put("cardinality", "REQUIRED"); + countryColumn.put("sqlTypeName", "VARCHAR"); + columns[0] = countryColumn; + + // continent column + Map continentColumn = new HashMap<>(); + continentColumn.put("name", "continent"); + continentColumn.put("cardinality", "REQUIRED"); + continentColumn.put("sqlTypeName", "VARCHAR"); + columns[1] = continentColumn; + + // population column + Map populationColumn = new HashMap<>(); + populationColumn.put("name", "population"); + populationColumn.put("cardinality", "REQUIRED"); + populationColumn.put("sqlTypeName", "INTEGER"); + columns[2] = populationColumn; + + schemaMap.put("columns", columns); + + // Write schema to file + ObjectMapper mapper = new ObjectMapper(); + try (FileOutputStream fos = new FileOutputStream(new File(countriesDir, "schema.json"))) { + mapper.writeValue(fos, schemaMap); + } + } + + @Test + @Tag("integration") + void testSchemaCreation() throws IOException { + // Create a DataSchema instance + schema = new DataSchema(sampleDataDir, typeFactory); + + // Get the table map + Map tableMap = schema.getTableMap(); + + // Verify that the schema has tables + assertNotNull(tableMap); + assertFalse(tableMap.isEmpty(), "Schema should have at least one table"); + + // Verify that both test tables exist + assertTrue(tableMap.containsKey("cities"), "Schema should contain the 'cities' table"); + assertTrue(tableMap.containsKey("countries"), "Schema should contain the 'countries' table"); + assertNotNull(tableMap.get("cities"), "Cities table should not be null"); + assertNotNull(tableMap.get("countries"), "Countries table should not be null"); + } + + @Test + @Tag("integration") + void testSqlQueryWithSchema() throws Exception { + // Create a DataSchema instance + schema = new DataSchema(sampleDataDir, typeFactory); + + // Configure Calcite connection properties + Properties info = new Properties(); + info.setProperty("lex", "MYSQL"); + + // Set up a connection and register our schema + connection = DriverManager.getConnection("jdbc:calcite:", info); + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Register the schema + rootSchema.add("data", schema); + + // Execute a simple query + Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery( + "SELECT * FROM data.cities WHERE country = 'France'"); + + // Since we don't have actual data in the tables, we just verify the query executes + // In a real test, we would add data to the tables and verify the results + assertNotNull(resultSet, "ResultSet should not be null"); + + // Close resources + resultSet.close(); + statement.close(); + } + + @Test + @Tag("integration") + void testJoinQuery() throws Exception { + // Create a DataSchema instance + schema = new DataSchema(sampleDataDir, typeFactory); + + // Configure Calcite connection properties + Properties info = new Properties(); + info.setProperty("lex", "MYSQL"); + + // Set up a connection and register our schema + connection = DriverManager.getConnection("jdbc:calcite:", info); + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Register the schema + rootSchema.add("data", schema); + + // Execute a join query + Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery( + "SELECT c.city, c.country, co.continent " + + "FROM data.cities c " + + "JOIN data.countries co ON c.country = co.country " + + "WHERE co.continent = 'Europe'"); + + // Since we don't have actual data in the tables, we just verify the query executes + // In a real test, we would add data to the tables and verify the results + assertNotNull(resultSet, "ResultSet should not be null"); + + // Close resources + resultSet.close(); + statement.close(); + } +} diff --git a/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapSchemaTest.java b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapSchemaTest.java new file mode 100644 index 000000000..34a1eed06 --- /dev/null +++ b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapSchemaTest.java @@ -0,0 +1,189 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.openstreetmap; + +import static org.junit.jupiter.api.Assertions.*; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.HashMap; +import java.util.Properties; +import org.apache.baremaps.openstreetmap.pbf.PbfEntityReader; +import org.apache.baremaps.testing.TestFiles; +import org.apache.calcite.jdbc.CalciteConnection; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.schema.SchemaPlus; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +public class OpenStreetMapSchemaTest { + + @TempDir + Path tempDir; + + private Path sampleDataDir; + private RelDataTypeFactory typeFactory; + + @BeforeEach + void setup() throws IOException, SQLException { + // Create a temporary directory for test files + sampleDataDir = tempDir.resolve("osm-data"); + Files.createDirectories(sampleDataDir); + + // Get the absolute paths to the sample files + Path pbfSourcePath = TestFiles.SAMPLE_OSM_PBF.toAbsolutePath(); + Path xmlSourcePath = TestFiles.SAMPLE_OSM_XML.toAbsolutePath(); + + // Copy sample OSM files to the test directory + Path pbfPath = sampleDataDir.resolve("sample.osm.pbf"); + Path xmlPath = sampleDataDir.resolve("sample.osm.xml"); + + // Check if source files exist + if (!Files.exists(pbfSourcePath)) { + throw new IOException("Sample PBF file not found: " + pbfSourcePath); + } + if (!Files.exists(xmlSourcePath)) { + throw new IOException("Sample XML file not found: " + xmlSourcePath); + } + + Files.copy(pbfSourcePath, pbfPath); + Files.copy(xmlSourcePath, xmlPath); + + // Set up Calcite connection to get a RelDataTypeFactory + Properties info = new Properties(); + info.setProperty("lex", "MYSQL"); + + try (Connection connection = DriverManager.getConnection("jdbc:calcite:", info)) { + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + typeFactory = calciteConnection.getTypeFactory(); + } + } + + @Test + void testSchemaCreation() throws IOException { + // Create an OpenStreetMapSchema with the test directory + OpenStreetMapSchema schema = new OpenStreetMapSchema(sampleDataDir.toFile(), typeFactory); + + // Verify that the schema contains the expected tables + // The table name is based on the filename without extension + assertTrue(schema.getTableMap().containsKey("sample"), "Schema should contain 'sample' table"); + + // Verify that the table has the expected structure + OpenStreetMapTable table = (OpenStreetMapTable) schema.getTableMap().get("sample"); + assertNotNull(table, "Table should not be null"); + + // Verify the schema structure + int fieldCount = table.getRowType(typeFactory).getFieldCount(); + assertEquals(9, fieldCount, "Schema should have 9 columns"); + } + + @Test + void testSqlQueryWithDirectory() throws Exception { + // Create an OpenStreetMapSchema with the test directory + OpenStreetMapSchema schema = new OpenStreetMapSchema(sampleDataDir.toFile(), typeFactory); + + // Configure Calcite connection properties + Properties info = new Properties(); + info.setProperty("lex", "MYSQL"); + + // Set up a connection and register our schema + try (Connection connection = DriverManager.getConnection("jdbc:calcite:", info)) { + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Add the schema to the root schema + rootSchema.add("osm", schema); + + // Test a simple query to select a limited number of entities + // The table name is based on the filename without extension + try (Statement statement = connection.createStatement(); + ResultSet resultSet = + statement.executeQuery("SELECT id, type FROM osm.sample LIMIT 10")) { + int rowCount = 0; + + while (resultSet.next()) { + rowCount++; + long id = resultSet.getLong("id"); + String type = resultSet.getString("type"); + + // Verify basic properties + assertTrue(id != 0, "Entity should have non-zero ID"); + assertNotNull(type, "Entity should have a type"); + } + + // Verify that we got some rows + assertTrue(rowCount > 0, "Should have retrieved at least one entity"); + } + } + } + + @Test + void testSqlQueryWithSingleFile() throws Exception { + // Create a properly configured PbfEntityReader + PbfEntityReader entityReader = new PbfEntityReader(); + entityReader.setGeometries(true); + entityReader.setCoordinateMap(new HashMap<>()); + entityReader.setReferenceMap(new HashMap<>()); + + // Create an OpenStreetMapSchema with a single file + File pbfFile = sampleDataDir.resolve("sample.osm.pbf").toFile(); + OpenStreetMapSchema schema = new OpenStreetMapSchema(pbfFile, typeFactory, false); + + // Configure Calcite connection properties + Properties info = new Properties(); + info.setProperty("lex", "MYSQL"); + + // Set up a connection and register our schema + try (Connection connection = DriverManager.getConnection("jdbc:calcite:", info)) { + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Add the schema to the root schema + rootSchema.add("osm", schema); + + // Test a simple query to select a limited number of entities + // For a single file, the table name is "sample" (not "osm") + try (Statement statement = connection.createStatement(); + ResultSet resultSet = + statement.executeQuery("SELECT id, type FROM osm.sample LIMIT 10")) { + int rowCount = 0; + + while (resultSet.next()) { + rowCount++; + long id = resultSet.getLong("id"); + String type = resultSet.getString("type"); + + // Verify basic properties + assertTrue(id != 0, "Entity should have non-zero ID"); + assertNotNull(type, "Entity should have a type"); + } + + // Verify that we got some rows + assertTrue(rowCount > 0, "Should have retrieved at least one entity"); + } + } + } +} diff --git a/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapTableTest.java b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapTableTest.java index 73aa021ae..74e2f1eaa 100644 --- a/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapTableTest.java +++ b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/openstreetmap/OpenStreetMapTableTest.java @@ -51,7 +51,7 @@ void testSchemaVerification() throws Exception { entityReader.setGeometries(false); // Don't generate geometries to avoid errors // Create the OpenStreetMapTable - OpenStreetMapTable osmTable = new OpenStreetMapTable(entityReader, inputStream); + OpenStreetMapTable osmTable = new OpenStreetMapTable(SAMPLE_OSM_PATH.toFile(), entityReader); // Verify the schema structure RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl(); @@ -91,7 +91,7 @@ void testSqlQueryWithRealPbfFile() throws Exception { try (var inputStream = new FileInputStream(SAMPLE_OSM_PATH.toFile())) { // Create the table with our configured reader - OpenStreetMapTable osmTable = new OpenStreetMapTable(entityReader, inputStream); + OpenStreetMapTable osmTable = new OpenStreetMapTable(SAMPLE_OSM_PATH.toFile(), entityReader); // Configure Calcite connection properties Properties info = new Properties(); diff --git a/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/postgres/PostgresModifiableTableTest.java b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/postgres/PostgresModifiableTableTest.java index d2662440f..6f24ca826 100644 --- a/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/postgres/PostgresModifiableTableTest.java +++ b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/postgres/PostgresModifiableTableTest.java @@ -25,7 +25,7 @@ import java.util.List; import java.util.Properties; import javax.sql.DataSource; -import org.apache.baremaps.calcite.data.DataSchema; +import org.apache.baremaps.calcite.data.DataTableSchema; import org.apache.baremaps.testing.PostgresContainerTest; import org.apache.calcite.jdbc.CalciteConnection; import org.apache.calcite.jdbc.JavaTypeFactoryImpl; @@ -89,7 +89,7 @@ class SchemaTests { @Tag("integration") void schemaContainsExpectedColumns() throws Exception { PostgresModifiableTable table = new PostgresModifiableTable(dataSource(), TEST_TABLE); - DataSchema schema = table.schema(); + DataTableSchema schema = table.schema(); assertNotNull(schema, "Schema should not be null"); assertEquals(TEST_TABLE, schema.name(), "Schema should have correct name"); diff --git a/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/postgres/PostgresSchemaTest.java b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/postgres/PostgresSchemaTest.java new file mode 100644 index 000000000..37a2a2c13 --- /dev/null +++ b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/postgres/PostgresSchemaTest.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.postgres; + +import static org.junit.jupiter.api.Assertions.*; + +import java.sql.*; +import java.util.Map; +import java.util.Properties; +import javax.sql.DataSource; +import org.apache.baremaps.testing.PostgresContainerTest; +import org.apache.calcite.jdbc.CalciteConnection; +import org.apache.calcite.jdbc.JavaTypeFactoryImpl; +import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.schema.SchemaPlus; +import org.apache.calcite.schema.Table; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +/** + * Tests for the PostgresSchema class, which provides access to PostgreSQL tables through the + * Calcite framework. + */ +class PostgresSchemaTest extends PostgresContainerTest { + + private static final String TEST_TABLE = "test_table"; + private static final String TEST_SCHEMA = "public"; + private DataSource dataSource; + private RelDataTypeFactory typeFactory; + + @BeforeEach + void setUp() throws SQLException { + dataSource = dataSource(); + typeFactory = new JavaTypeFactoryImpl(); + + // Create a test table + try (Connection connection = dataSource.getConnection(); + Statement statement = connection.createStatement()) { + // Ensure PostGIS extension is available + statement.execute("CREATE EXTENSION IF NOT EXISTS postgis"); + + statement.execute("DROP TABLE IF EXISTS " + TEST_TABLE); + statement.execute("CREATE TABLE " + TEST_TABLE + " (" + + "id INTEGER PRIMARY KEY, " + + "name VARCHAR(100), " + + "description TEXT" + + ")"); + + // Insert some test data + statement.execute("INSERT INTO " + TEST_TABLE + " VALUES (1, 'Test 1', 'Description 1')"); + statement.execute("INSERT INTO " + TEST_TABLE + " VALUES (2, 'Test 2', 'Description 2')"); + } + } + + @Test + @Tag("integration") + void testGetTableMap() throws SQLException { + // Create a PostgresSchema instance + PostgresSchema schema = new PostgresSchema(dataSource, TEST_SCHEMA, typeFactory); + + // Get the table map + Map tableMap = schema.getTableMap(); + + // Verify that our test table is in the map + assertTrue(tableMap.containsKey(TEST_TABLE), "Table map should contain test table"); + + // Verify that we can query the table through Calcite + Properties info = new Properties(); + info.setProperty("lex", "MYSQL"); + info.setProperty("caseSensitive", "false"); + + try (Connection connection = DriverManager.getConnection("jdbc:calcite:", info)) { + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Add our schema to the root schema + rootSchema.add(TEST_SCHEMA, schema); + + // Query the table + try (Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery( + "SELECT * FROM " + TEST_SCHEMA + "." + TEST_TABLE + " ORDER BY id")) { + + // Verify first row + assertTrue(resultSet.next()); + assertEquals(1, resultSet.getInt("id")); + assertEquals("Test 1", resultSet.getString("name")); + assertEquals("Description 1", resultSet.getString("description")); + + // Verify second row + assertTrue(resultSet.next()); + assertEquals(2, resultSet.getInt("id")); + assertEquals("Test 2", resultSet.getString("name")); + assertEquals("Description 2", resultSet.getString("description")); + + // Verify no more rows + assertFalse(resultSet.next()); + } + } + } +} diff --git a/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/rpsl/RpslSchemaTest.java b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/rpsl/RpslSchemaTest.java new file mode 100644 index 000000000..49c5424be --- /dev/null +++ b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/rpsl/RpslSchemaTest.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.rpsl; + +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import org.apache.baremaps.testing.TestFiles; +import org.apache.calcite.jdbc.CalciteConnection; +import org.apache.calcite.schema.SchemaPlus; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +/** + * Test class for the RpslSchema implementation. + */ +public class RpslSchemaTest { + + @TempDir + Path tempDir; + + @BeforeEach + public void setup() throws SQLException, IOException { + // Create a temporary directory for test files + Path testDir = tempDir.resolve("rpsl-test"); + Files.createDirectories(testDir); + + // Copy the sample RPSL file from TestFiles + Path sourceFile = TestFiles.RPSL_TXT; + Path targetFile = testDir.resolve("sample.txt"); + Files.copy(sourceFile, targetFile, StandardCopyOption.REPLACE_EXISTING); + } + + @Test + public void testSchemaCreation() throws SQLException, IOException { + // Create a connection to Calcite + Connection connection = DriverManager.getConnection("jdbc:calcite:"); + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Create and register the RPSL schema + RpslSchema schema = + new RpslSchema(tempDir.resolve("rpsl-test").toFile(), calciteConnection.getTypeFactory()); + rootSchema.add("rpsl", schema); + + // Verify that the schema contains the expected table + assertTrue(rootSchema.getSubSchemaNames().contains("rpsl")); + assertTrue(rootSchema.getSubSchema("rpsl").getTableNames().contains("sample")); + + connection.close(); + } + + @Test + public void testSqlQuery() throws SQLException, IOException { + // Create a connection to Calcite + Connection connection = DriverManager.getConnection("jdbc:calcite:"); + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Create and register the RPSL schema + RpslSchema schema = + new RpslSchema(tempDir.resolve("rpsl-test").toFile(), calciteConnection.getTypeFactory()); + rootSchema.add("rpsl", schema); + + // Execute a simple SQL query - use lowercase for schema and table names + try (Statement statement = connection.createStatement()) { + ResultSet resultSet = statement.executeQuery( + "SELECT * FROM \"rpsl\".\"sample\""); + + // Verify that we get results + assertTrue(resultSet.next()); + + // Verify that the result set has the expected columns + assertNotNull(resultSet.getMetaData()); + assertTrue(resultSet.getMetaData().getColumnCount() > 0); + + // Verify that we can access the data + String inetnum = resultSet.getString("inetnum"); + assertNotNull(inetnum); + } + + connection.close(); + } + + @Test + public void testSingleFileSchema() throws SQLException, IOException { + // Create a connection to Calcite + Connection connection = DriverManager.getConnection("jdbc:calcite:"); + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Get the sample RPSL file + File sampleFile = tempDir.resolve("rpsl-test").resolve("sample.txt").toFile(); + + // Create and register the RPSL schema with a single file + RpslSchema schema = new RpslSchema(sampleFile, calciteConnection.getTypeFactory(), false); + rootSchema.add("single", schema); + + // Verify that the schema contains the expected table + assertTrue(rootSchema.getSubSchemaNames().contains("single")); + assertTrue(rootSchema.getSubSchema("single").getTableNames().contains("sample")); + + // Execute a simple SQL query - use lowercase for schema and table names + try (Statement statement = connection.createStatement()) { + ResultSet resultSet = statement.executeQuery( + "SELECT * FROM \"single\".\"sample\""); + + // Verify that we get results + assertTrue(resultSet.next()); + + // Verify that the result set has the expected columns + assertNotNull(resultSet.getMetaData()); + assertTrue(resultSet.getMetaData().getColumnCount() > 0); + + // Verify that we can access the data + String inetnum = resultSet.getString("inetnum"); + assertNotNull(inetnum); + } + + connection.close(); + } +} diff --git a/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/rpsl/RpslTableTest.java b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/rpsl/RpslTableTest.java index 846ce1e08..be7558c34 100644 --- a/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/rpsl/RpslTableTest.java +++ b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/rpsl/RpslTableTest.java @@ -37,7 +37,7 @@ class RpslTableTest { private static final File SAMPLE_RPSL_FILE = - TestFiles.RIPE_TXT.toFile(); + TestFiles.RPSL_TXT.toFile(); @Test void testSchemaVerification() throws IOException { diff --git a/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/shapefile/ShapefileSchemaTest.java b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/shapefile/ShapefileSchemaTest.java new file mode 100644 index 000000000..02376fc73 --- /dev/null +++ b/baremaps-calcite/src/test/java/org/apache/baremaps/calcite/shapefile/ShapefileSchemaTest.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to you under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.baremaps.calcite.shapefile; + +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import org.apache.baremaps.testing.TestFiles; +import org.apache.calcite.jdbc.CalciteConnection; +import org.apache.calcite.schema.SchemaPlus; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +/** + * Test class for the ShapefileSchema implementation. + */ +public class ShapefileSchemaTest { + + @TempDir + Path tempDir; + + private Connection connection; + private ShapefileSchema schema; + + @BeforeEach + public void setup() throws SQLException, IOException { + // Create a temporary directory for test files + Path testDir = tempDir.resolve("shapefile-test"); + Files.createDirectories(testDir); + + // Copy the sample shapefile to the test directory + Path sourceFile = TestFiles.POINT_SHP; + Path targetFile = testDir.resolve("point.shp"); + Files.copy(sourceFile, targetFile, StandardCopyOption.REPLACE_EXISTING); + + // Copy the associated .dbf file + Path sourceDbf = sourceFile.resolveSibling("point.dbf"); + Path targetDbf = targetFile.resolveSibling("point.dbf"); + Files.copy(sourceDbf, targetDbf, StandardCopyOption.REPLACE_EXISTING); + + // Copy the associated .shx file + Path sourceShx = sourceFile.resolveSibling("point.shx"); + Path targetShx = targetFile.resolveSibling("point.shx"); + Files.copy(sourceShx, targetShx, StandardCopyOption.REPLACE_EXISTING); + } + + @AfterEach + public void cleanup() throws SQLException { + if (connection != null && !connection.isClosed()) { + connection.close(); + connection = null; + } + + // Force garbage collection to release file handles + System.gc(); + } + + @Test + public void testSchemaCreation() throws SQLException, IOException { + // Create a connection to Calcite + connection = DriverManager.getConnection("jdbc:calcite:"); + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Create and register the shapefile schema + schema = new ShapefileSchema(tempDir.resolve("shapefile-test").toFile(), + calciteConnection.getTypeFactory()); + rootSchema.add("shapefile", schema); + + // Verify that the schema contains the expected table + assertTrue(rootSchema.getSubSchemaNames().contains("shapefile")); + assertTrue(rootSchema.getSubSchema("shapefile").getTableNames().contains("point")); + } + + @Test + public void testSqlQuery() throws SQLException, IOException { + // Create a connection to Calcite + connection = DriverManager.getConnection("jdbc:calcite:"); + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Create and register the shapefile schema + schema = new ShapefileSchema(tempDir.resolve("shapefile-test").toFile(), + calciteConnection.getTypeFactory()); + rootSchema.add("shapefile", schema); + + // Execute a simple SQL query - use lowercase for schema and table names + Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery( + "SELECT * FROM \"shapefile\".\"point\""); + + // Verify that we get results + assertTrue(resultSet.next()); + + // Verify that the result set has the expected columns + // Note: The actual column names will depend on the sample shapefile + assertNotNull(resultSet.getMetaData()); + assertTrue(resultSet.getMetaData().getColumnCount() > 0); + + // Close resources + resultSet.close(); + statement.close(); + } + + @Test + public void testSingleFileSchema() throws SQLException, IOException { + // Create a connection to Calcite + connection = DriverManager.getConnection("jdbc:calcite:"); + CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class); + SchemaPlus rootSchema = calciteConnection.getRootSchema(); + + // Get the sample shapefile + File sampleFile = tempDir.resolve("shapefile-test").resolve("point.shp").toFile(); + + // Create and register the shapefile schema with a single file + schema = + new ShapefileSchema(sampleFile, calciteConnection.getTypeFactory(), false); + rootSchema.add("single", schema); + + // Verify that the schema contains the expected table + assertTrue(rootSchema.getSubSchemaNames().contains("single")); + assertTrue(rootSchema.getSubSchema("single").getTableNames().contains("point")); + + // Execute a simple SQL query - use lowercase for schema and table names + Statement statement = connection.createStatement(); + ResultSet resultSet = statement.executeQuery( + "SELECT * FROM \"single\".\"point\""); + + // Verify that we get results + assertTrue(resultSet.next()); + + // Verify that the result set has the expected columns + assertNotNull(resultSet.getMetaData()); + assertTrue(resultSet.getMetaData().getColumnCount() > 0); + + // Close resources + resultSet.close(); + statement.close(); + } +} diff --git a/baremaps-core/src/test/java/org/apache/baremaps/integration/GeoParquetToPostgresTest.java b/baremaps-core/src/test/java/org/apache/baremaps/integration/GeoParquetToPostgresTest.java index ef9952155..c9a6bfb14 100644 --- a/baremaps-core/src/test/java/org/apache/baremaps/integration/GeoParquetToPostgresTest.java +++ b/baremaps-core/src/test/java/org/apache/baremaps/integration/GeoParquetToPostgresTest.java @@ -41,7 +41,7 @@ class GeoParquetToPostgresTest extends PostgresContainerTest { @Tag("integration") void copyGeoParquetToPostgres() throws Exception { // Open the GeoParquet - var uri = TestFiles.resolve("baremaps-testing/data/samples/example.parquet").toUri(); + var uri = TestFiles.GEOPARQUET.toUri(); // Set ThreadLocal DataSource for PostgresDdlExecutor to use PostgresDdlExecutor.setThreadLocalDataSource(dataSource()); diff --git a/baremaps-core/src/test/java/org/apache/baremaps/iploc/IpLocObjectTest.java b/baremaps-core/src/test/java/org/apache/baremaps/iploc/IpLocObjectTest.java index 713b6a37f..5d2c58ff9 100644 --- a/baremaps-core/src/test/java/org/apache/baremaps/iploc/IpLocObjectTest.java +++ b/baremaps-core/src/test/java/org/apache/baremaps/iploc/IpLocObjectTest.java @@ -61,7 +61,7 @@ class IpLocObjectTest { @BeforeAll public static void beforeAll() throws Exception { // Load the NIC sample objects - var file = TestFiles.resolve("baremaps-testing/data/ripe/sample.txt"); + var file = TestFiles.RPSL_TXT; try (var input = Files.newInputStream(file)) { rpslObjects = new RpslReader().read(input).toList(); } diff --git a/baremaps-core/src/test/java/org/apache/baremaps/tilestore/TileDataSchemaTest.java b/baremaps-core/src/test/java/org/apache/baremaps/tilestore/TileDataTableSchemaTest.java similarity index 97% rename from baremaps-core/src/test/java/org/apache/baremaps/tilestore/TileDataSchemaTest.java rename to baremaps-core/src/test/java/org/apache/baremaps/tilestore/TileDataTableSchemaTest.java index 0fea6925f..81a5296f4 100644 --- a/baremaps-core/src/test/java/org/apache/baremaps/tilestore/TileDataSchemaTest.java +++ b/baremaps-core/src/test/java/org/apache/baremaps/tilestore/TileDataTableSchemaTest.java @@ -24,7 +24,7 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -public abstract class TileDataSchemaTest { +public abstract class TileDataTableSchemaTest { // TODO: try to move this in the testing module diff --git a/baremaps-core/src/test/java/org/apache/baremaps/tilestore/file/FileTileStoreTest.java b/baremaps-core/src/test/java/org/apache/baremaps/tilestore/file/FileTileStoreTest.java index d229899ba..a96d95235 100644 --- a/baremaps-core/src/test/java/org/apache/baremaps/tilestore/file/FileTileStoreTest.java +++ b/baremaps-core/src/test/java/org/apache/baremaps/tilestore/file/FileTileStoreTest.java @@ -24,12 +24,12 @@ import java.nio.file.Path; import java.nio.file.Paths; import org.apache.baremaps.data.util.FileUtils; -import org.apache.baremaps.tilestore.TileDataSchemaTest; +import org.apache.baremaps.tilestore.TileDataTableSchemaTest; import org.apache.baremaps.tilestore.TileStore; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; -class FileTileStoreTest extends TileDataSchemaTest { +class FileTileStoreTest extends TileDataTableSchemaTest { Path directory; diff --git a/baremaps-core/src/test/java/org/apache/baremaps/tilestore/mbtiles/MBTilesStoreTest.java b/baremaps-core/src/test/java/org/apache/baremaps/tilestore/mbtiles/MBTilesStoreTest.java index 7ef9f9887..33e23e9d9 100644 --- a/baremaps-core/src/test/java/org/apache/baremaps/tilestore/mbtiles/MBTilesStoreTest.java +++ b/baremaps-core/src/test/java/org/apache/baremaps/tilestore/mbtiles/MBTilesStoreTest.java @@ -26,13 +26,13 @@ import java.util.HashMap; import java.util.Map; import org.apache.baremaps.data.util.FileUtils; -import org.apache.baremaps.tilestore.TileDataSchemaTest; +import org.apache.baremaps.tilestore.TileDataTableSchemaTest; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.sqlite.SQLiteDataSource; -class MBTilesStoreTest extends TileDataSchemaTest { +class MBTilesStoreTest extends TileDataTableSchemaTest { Path file; diff --git a/baremaps-data/pom.xml b/baremaps-data/pom.xml index e068a4822..b392c8fc9 100644 --- a/baremaps-data/pom.xml +++ b/baremaps-data/pom.xml @@ -34,10 +34,6 @@ limitations under the License. org.apache.calcite calcite-core - - org.apache.calcite - calcite-server - org.roaringbitmap RoaringBitmap diff --git a/baremaps-data/src/main/java/org/apache/baremaps/data/collection/AppendOnlyLog.java b/baremaps-data/src/main/java/org/apache/baremaps/data/collection/AppendOnlyLog.java index da647e5c9..eb0b6203f 100644 --- a/baremaps-data/src/main/java/org/apache/baremaps/data/collection/AppendOnlyLog.java +++ b/baremaps-data/src/main/java/org/apache/baremaps/data/collection/AppendOnlyLog.java @@ -199,9 +199,8 @@ public long size() { /** {@inheritDoc} */ public void clear() { try { - memory.clear(); this.size = 0; - persistSize(); + memory.clear(); } catch (IOException e) { throw new DataCollectionException(e); } diff --git a/baremaps-data/src/main/java/org/apache/baremaps/data/memory/MemoryMappedDirectory.java b/baremaps-data/src/main/java/org/apache/baremaps/data/memory/MemoryMappedDirectory.java index 7b2c16b58..48e2148ee 100644 --- a/baremaps-data/src/main/java/org/apache/baremaps/data/memory/MemoryMappedDirectory.java +++ b/baremaps-data/src/main/java/org/apache/baremaps/data/memory/MemoryMappedDirectory.java @@ -116,7 +116,9 @@ public synchronized void clear() throws IOException { header.clear(); segments.clear(); - // Delete the directory and all files in it - FileUtils.deleteRecursively(directory); + // Delete the directory and all files in it if it exists + if (Files.exists(directory)) { + FileUtils.deleteRecursively(directory); + } } } diff --git a/baremaps-rpsl/src/test/java/org/apache/baremaps/rpsl/RpslObjectTest.java b/baremaps-rpsl/src/test/java/org/apache/baremaps/rpsl/RpslObjectTest.java index a3cfb75b5..80d0a3a84 100644 --- a/baremaps-rpsl/src/test/java/org/apache/baremaps/rpsl/RpslObjectTest.java +++ b/baremaps-rpsl/src/test/java/org/apache/baremaps/rpsl/RpslObjectTest.java @@ -32,7 +32,7 @@ class RpslObjectTest { @BeforeEach public void before() throws IOException { - var file = TestFiles.resolve("baremaps-testing/data/ripe/sample.txt"); + var file = TestFiles.RPSL_TXT; try (var input = Files.newInputStream(file)) { objects = new RpslReader().read(input).toList(); } diff --git a/baremaps-rpsl/src/test/java/org/apache/baremaps/rpsl/RpslParserTest.java b/baremaps-rpsl/src/test/java/org/apache/baremaps/rpsl/RpslParserTest.java index cd795c622..48e8feae9 100644 --- a/baremaps-rpsl/src/test/java/org/apache/baremaps/rpsl/RpslParserTest.java +++ b/baremaps-rpsl/src/test/java/org/apache/baremaps/rpsl/RpslParserTest.java @@ -30,7 +30,7 @@ class RpslParserTest { @Test void parseObjects() throws IOException { - var file = TestFiles.resolve("baremaps-testing/data/ripe/sample.txt"); + var file = TestFiles.RPSL_TXT; try (var input = Files.newInputStream(file)) { List objects = new RpslReader().read(input).toList(); assertEquals(10, objects.size()); @@ -39,7 +39,7 @@ void parseObjects() throws IOException { @Test void parseAttributes() throws IOException { - var file = TestFiles.resolve("baremaps-testing/data/ripe/sample.txt"); + var file = TestFiles.RPSL_TXT; try (var input = Files.newInputStream(file)) { List objects = new RpslReader().read(input).toList(); List attributes = objects.stream() diff --git a/baremaps-shapefile/src/main/java/org/apache/baremaps/shapefile/ShapefileInputStream.java b/baremaps-shapefile/src/main/java/org/apache/baremaps/shapefile/ShapefileInputStream.java index d83d1306b..4b3c269e3 100644 --- a/baremaps-shapefile/src/main/java/org/apache/baremaps/shapefile/ShapefileInputStream.java +++ b/baremaps-shapefile/src/main/java/org/apache/baremaps/shapefile/ShapefileInputStream.java @@ -108,11 +108,32 @@ public int available() { /** @see java.io.InputStream#close() */ @Override public void close() throws IOException { - if (this.dbaseReader != null) { - this.dbaseReader.close(); + IOException firstException = null; + + try { + if (this.dbaseReader != null) { + this.dbaseReader.close(); + this.dbaseReader = null; + } + } catch (IOException e) { + firstException = e; } - if (this.shapefileReader != null) { - this.shapefileReader.close(); + + try { + if (this.shapefileReader != null) { + this.shapefileReader.close(); + this.shapefileReader = null; + } + } catch (IOException e) { + if (firstException == null) { + firstException = e; + } else { + firstException.addSuppressed(e); + } + } + + if (firstException != null) { + throw firstException; } } diff --git a/baremaps-shapefile/src/main/java/org/apache/baremaps/shapefile/ShapefileReader.java b/baremaps-shapefile/src/main/java/org/apache/baremaps/shapefile/ShapefileReader.java index b9d994b2a..b9f0ac2d5 100644 --- a/baremaps-shapefile/src/main/java/org/apache/baremaps/shapefile/ShapefileReader.java +++ b/baremaps-shapefile/src/main/java/org/apache/baremaps/shapefile/ShapefileReader.java @@ -37,7 +37,7 @@ * @see dBASE III File * Structure */ -public class ShapefileReader { +public class ShapefileReader implements AutoCloseable { /** Shapefile. */ private File shapefile; @@ -53,6 +53,9 @@ public class ShapefileReader { /** Database field descriptors. */ private List databaseFieldsDescriptors; + /** The underlying input stream, if open */ + private ShapefileInputStream inputStream; + /** * Construct a Shapefile from a file. * @@ -170,11 +173,10 @@ public File getFileShapefileIndex() { * @return Features */ public ShapefileInputStream read() throws IOException { - ShapefileInputStream is = - new ShapefileInputStream(this.shapefile, this.databaseFile, this.shapeFileIndex); - this.shapefileDescriptor = is.getShapefileDescriptor(); - this.databaseFieldsDescriptors = is.getDatabaseFieldsDescriptors(); - return is; + inputStream = new ShapefileInputStream(this.shapefile, this.databaseFile, this.shapeFileIndex); + this.shapefileDescriptor = inputStream.getShapefileDescriptor(); + this.databaseFieldsDescriptors = inputStream.getDatabaseFieldsDescriptors(); + return inputStream; } /** @@ -186,4 +188,18 @@ public void loadDescriptors() throws IOException { // Doing a read is sufficient to initialize the internal descriptors. } } + + /** + * Closes this reader and releases any system resources associated with it. If the reader is + * already closed then invoking this method has no effect. + * + * @throws IOException if an I/O error occurs + */ + @Override + public void close() throws IOException { + if (inputStream != null) { + inputStream.close(); + inputStream = null; + } + } } diff --git a/baremaps-testing/data/samples/README.md b/baremaps-testing/data/README.md similarity index 100% rename from baremaps-testing/data/samples/README.md rename to baremaps-testing/data/README.md diff --git a/baremaps-testing/data/samples/countries.gpkg b/baremaps-testing/data/geopackage/countries.gpkg similarity index 100% rename from baremaps-testing/data/samples/countries.gpkg rename to baremaps-testing/data/geopackage/countries.gpkg diff --git a/baremaps-testing/data/samples/data.gpkg b/baremaps-testing/data/geopackage/data.gpkg similarity index 100% rename from baremaps-testing/data/samples/data.gpkg rename to baremaps-testing/data/geopackage/data.gpkg diff --git a/baremaps-testing/data/samples/example.parquet b/baremaps-testing/data/geoparquet/example.parquet similarity index 100% rename from baremaps-testing/data/samples/example.parquet rename to baremaps-testing/data/geoparquet/example.parquet diff --git a/baremaps-testing/data/ripe/sample.txt b/baremaps-testing/data/rpsl/sample.txt similarity index 100% rename from baremaps-testing/data/ripe/sample.txt rename to baremaps-testing/data/rpsl/sample.txt diff --git a/baremaps-testing/src/main/java/org/apache/baremaps/testing/TestFiles.java b/baremaps-testing/src/main/java/org/apache/baremaps/testing/TestFiles.java index 445bcbced..372983fd4 100644 --- a/baremaps-testing/src/main/java/org/apache/baremaps/testing/TestFiles.java +++ b/baremaps-testing/src/main/java/org/apache/baremaps/testing/TestFiles.java @@ -68,10 +68,10 @@ private TestFiles() { resolve("baremaps-testing/data/archives/file.zip"); public static final Path GEOPARQUET = - resolve("baremaps-testing/data/samples/example.parquet"); + resolve("baremaps-testing/data/geoparquet/example.parquet"); public static final Path GEOPACKAGE = - resolve("baremaps-testing/data/samples/countries.gpkg"); + resolve("baremaps-testing/data/geopackage/countries.gpkg"); public static final Path TILESET_JSON = resolve("baremaps-testing/data/tilesets/tileset.json"); @@ -101,8 +101,8 @@ private TestFiles() { public static final Path POINT_FLATGEOBUF = resolve("baremaps-testing/data/flatgeobuf/countries.fgb"); - public static final Path RIPE_TXT = - resolve("baremaps-testing/data/ripe/sample.txt"); + public static final Path RPSL_TXT = + resolve("baremaps-testing/data/rpsl/sample.txt"); /* The geometries of the osm-sample/sample.osm.xml file */ diff --git a/pom.xml b/pom.xml index 72b39891c..f3ad159af 100644 --- a/pom.xml +++ b/pom.xml @@ -84,7 +84,7 @@ limitations under the License. 2.27.17 1.16.0 3.1.8 - 1.38.0 + 1.39.0 1.27.1 8.5.14 3.26.2 @@ -334,11 +334,6 @@ limitations under the License. calcite-core ${version.lib.calcite} - - org.apache.calcite - calcite-server - ${version.lib.calcite} - org.apache.commons commons-compress @@ -641,7 +636,6 @@ limitations under the License. true 2 groupId,artifactId - groupId,artifactId true true