sqlglot.dialects.redshift
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, transforms 6from sqlglot.dialects.dialect import ( 7 NormalizationStrategy, 8 concat_to_dpipe_sql, 9 concat_ws_to_dpipe_sql, 10 date_delta_sql, 11 generatedasidentitycolumnconstraint_sql, 12 json_extract_segments, 13 no_tablesample_sql, 14 rename_func, 15) 16from sqlglot.dialects.postgres import Postgres 17from sqlglot.helper import seq_get 18from sqlglot.tokens import TokenType 19 20if t.TYPE_CHECKING: 21 from sqlglot._typing import E 22 23 24def _build_date_delta(expr_type: t.Type[E]) -> t.Callable[[t.List], E]: 25 def _builder(args: t.List) -> E: 26 expr = expr_type(this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0)) 27 if expr_type is exp.TsOrDsAdd: 28 expr.set("return_type", exp.DataType.build("TIMESTAMP")) 29 30 return expr 31 32 return _builder 33 34 35class Redshift(Postgres): 36 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 37 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 38 39 SUPPORTS_USER_DEFINED_TYPES = False 40 INDEX_OFFSET = 0 41 COPY_PARAMS_ARE_CSV = False 42 43 TIME_FORMAT = "'YYYY-MM-DD HH:MI:SS'" 44 TIME_MAPPING = { 45 **Postgres.TIME_MAPPING, 46 "MON": "%b", 47 "HH": "%H", 48 } 49 50 class Parser(Postgres.Parser): 51 FUNCTIONS = { 52 **Postgres.Parser.FUNCTIONS, 53 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 54 this=seq_get(args, 0), 55 expression=seq_get(args, 1), 56 unit=exp.var("month"), 57 return_type=exp.DataType.build("TIMESTAMP"), 58 ), 59 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 60 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 61 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 62 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 63 "GETDATE": exp.CurrentTimestamp.from_arg_list, 64 "LISTAGG": exp.GroupConcat.from_arg_list, 65 "STRTOL": exp.FromBase.from_arg_list, 66 } 67 68 NO_PAREN_FUNCTION_PARSERS = { 69 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 70 "APPROXIMATE": lambda self: self._parse_approximate_count(), 71 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, transaction=True), 72 } 73 74 SUPPORTS_IMPLICIT_UNNEST = True 75 76 def _parse_table( 77 self, 78 schema: bool = False, 79 joins: bool = False, 80 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 81 parse_bracket: bool = False, 82 is_db_reference: bool = False, 83 parse_partition: bool = False, 84 ) -> t.Optional[exp.Expression]: 85 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 86 unpivot = self._match(TokenType.UNPIVOT) 87 table = super()._parse_table( 88 schema=schema, 89 joins=joins, 90 alias_tokens=alias_tokens, 91 parse_bracket=parse_bracket, 92 is_db_reference=is_db_reference, 93 ) 94 95 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 96 97 def _parse_convert( 98 self, strict: bool, safe: t.Optional[bool] = None 99 ) -> t.Optional[exp.Expression]: 100 to = self._parse_types() 101 self._match(TokenType.COMMA) 102 this = self._parse_bitwise() 103 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 104 105 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 106 index = self._index - 1 107 func = self._parse_function() 108 109 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 110 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 111 self._retreat(index) 112 return None 113 114 class Tokenizer(Postgres.Tokenizer): 115 BIT_STRINGS = [] 116 HEX_STRINGS = [] 117 STRING_ESCAPES = ["\\", "'"] 118 119 KEYWORDS = { 120 **Postgres.Tokenizer.KEYWORDS, 121 "HLLSKETCH": TokenType.HLLSKETCH, 122 "SUPER": TokenType.SUPER, 123 "TOP": TokenType.TOP, 124 "UNLOAD": TokenType.COMMAND, 125 "VARBYTE": TokenType.VARBINARY, 126 } 127 KEYWORDS.pop("VALUES") 128 129 # Redshift allows # to appear as a table identifier prefix 130 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 131 SINGLE_TOKENS.pop("#") 132 133 class Generator(Postgres.Generator): 134 LOCKING_READS_SUPPORTED = False 135 QUERY_HINTS = False 136 VALUES_AS_TABLE = False 137 TZ_TO_WITH_TIME_ZONE = True 138 NVL2_SUPPORTED = True 139 LAST_DAY_SUPPORTS_DATE_PART = False 140 CAN_IMPLEMENT_ARRAY_ANY = False 141 MULTI_ARG_DISTINCT = True 142 COPY_PARAMS_ARE_WRAPPED = False 143 144 TYPE_MAPPING = { 145 **Postgres.Generator.TYPE_MAPPING, 146 exp.DataType.Type.BINARY: "VARBYTE", 147 exp.DataType.Type.INT: "INTEGER", 148 exp.DataType.Type.TIMETZ: "TIME", 149 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 150 exp.DataType.Type.VARBINARY: "VARBYTE", 151 exp.DataType.Type.ROWVERSION: "VARBYTE", 152 } 153 154 TRANSFORMS = { 155 **Postgres.Generator.TRANSFORMS, 156 exp.Concat: concat_to_dpipe_sql, 157 exp.ConcatWs: concat_ws_to_dpipe_sql, 158 exp.ApproxDistinct: lambda self, 159 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 160 exp.CurrentTimestamp: lambda self, e: ( 161 "SYSDATE" if e.args.get("transaction") else "GETDATE()" 162 ), 163 exp.DateAdd: date_delta_sql("DATEADD"), 164 exp.DateDiff: date_delta_sql("DATEDIFF"), 165 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 166 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 167 exp.FromBase: rename_func("STRTOL"), 168 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 169 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 170 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 171 exp.GroupConcat: rename_func("LISTAGG"), 172 exp.ParseJSON: rename_func("JSON_PARSE"), 173 exp.Select: transforms.preprocess( 174 [ 175 transforms.eliminate_distinct_on, 176 transforms.eliminate_semi_and_anti_joins, 177 transforms.unqualify_unnest, 178 ] 179 ), 180 exp.SortKeyProperty: lambda self, 181 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 182 exp.StartsWith: lambda self, 183 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 184 exp.TableSample: no_tablesample_sql, 185 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 186 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 187 exp.UnixToTime: lambda self, 188 e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')", 189 } 190 191 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 192 TRANSFORMS.pop(exp.Pivot) 193 194 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 195 TRANSFORMS.pop(exp.Pow) 196 197 # Redshift supports ANY_VALUE(..) 198 TRANSFORMS.pop(exp.AnyValue) 199 200 # Redshift supports LAST_DAY(..) 201 TRANSFORMS.pop(exp.LastDay) 202 203 RESERVED_KEYWORDS = { 204 "aes128", 205 "aes256", 206 "all", 207 "allowoverwrite", 208 "analyse", 209 "analyze", 210 "and", 211 "any", 212 "array", 213 "as", 214 "asc", 215 "authorization", 216 "az64", 217 "backup", 218 "between", 219 "binary", 220 "blanksasnull", 221 "both", 222 "bytedict", 223 "bzip2", 224 "case", 225 "cast", 226 "check", 227 "collate", 228 "column", 229 "constraint", 230 "create", 231 "credentials", 232 "cross", 233 "current_date", 234 "current_time", 235 "current_timestamp", 236 "current_user", 237 "current_user_id", 238 "default", 239 "deferrable", 240 "deflate", 241 "defrag", 242 "delta", 243 "delta32k", 244 "desc", 245 "disable", 246 "distinct", 247 "do", 248 "else", 249 "emptyasnull", 250 "enable", 251 "encode", 252 "encrypt ", 253 "encryption", 254 "end", 255 "except", 256 "explicit", 257 "false", 258 "for", 259 "foreign", 260 "freeze", 261 "from", 262 "full", 263 "globaldict256", 264 "globaldict64k", 265 "grant", 266 "group", 267 "gzip", 268 "having", 269 "identity", 270 "ignore", 271 "ilike", 272 "in", 273 "initially", 274 "inner", 275 "intersect", 276 "interval", 277 "into", 278 "is", 279 "isnull", 280 "join", 281 "leading", 282 "left", 283 "like", 284 "limit", 285 "localtime", 286 "localtimestamp", 287 "lun", 288 "luns", 289 "lzo", 290 "lzop", 291 "minus", 292 "mostly16", 293 "mostly32", 294 "mostly8", 295 "natural", 296 "new", 297 "not", 298 "notnull", 299 "null", 300 "nulls", 301 "off", 302 "offline", 303 "offset", 304 "oid", 305 "old", 306 "on", 307 "only", 308 "open", 309 "or", 310 "order", 311 "outer", 312 "overlaps", 313 "parallel", 314 "partition", 315 "percent", 316 "permissions", 317 "pivot", 318 "placing", 319 "primary", 320 "raw", 321 "readratio", 322 "recover", 323 "references", 324 "rejectlog", 325 "resort", 326 "respect", 327 "restore", 328 "right", 329 "select", 330 "session_user", 331 "similar", 332 "snapshot", 333 "some", 334 "sysdate", 335 "system", 336 "table", 337 "tag", 338 "tdes", 339 "text255", 340 "text32k", 341 "then", 342 "timestamp", 343 "to", 344 "top", 345 "trailing", 346 "true", 347 "truncatecolumns", 348 "type", 349 "union", 350 "unique", 351 "unnest", 352 "unpivot", 353 "user", 354 "using", 355 "verbose", 356 "wallet", 357 "when", 358 "where", 359 "with", 360 "without", 361 } 362 363 def unnest_sql(self, expression: exp.Unnest) -> str: 364 args = expression.expressions 365 num_args = len(args) 366 367 if num_args > 1: 368 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 369 return "" 370 371 arg = self.sql(seq_get(args, 0)) 372 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 373 return f"{arg} AS {alias}" if alias else arg 374 375 def with_properties(self, properties: exp.Properties) -> str: 376 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 377 return self.properties(properties, prefix=" ", suffix="") 378 379 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 380 if expression.is_type(exp.DataType.Type.JSON): 381 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 382 return self.sql(expression, "this") 383 384 return super().cast_sql(expression, safe_prefix=safe_prefix) 385 386 def datatype_sql(self, expression: exp.DataType) -> str: 387 """ 388 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 389 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 390 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 391 `TEXT` to `VARCHAR`. 392 """ 393 if expression.is_type("text"): 394 expression.set("this", exp.DataType.Type.VARCHAR) 395 precision = expression.args.get("expressions") 396 397 if not precision: 398 expression.append("expressions", exp.var("MAX")) 399 400 return super().datatype_sql(expression)
36class Redshift(Postgres): 37 # https://docs.aws.amazon.com/redshift/latest/dg/r_names.html 38 NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE 39 40 SUPPORTS_USER_DEFINED_TYPES = False 41 INDEX_OFFSET = 0 42 COPY_PARAMS_ARE_CSV = False 43 44 TIME_FORMAT = "'YYYY-MM-DD HH:MI:SS'" 45 TIME_MAPPING = { 46 **Postgres.TIME_MAPPING, 47 "MON": "%b", 48 "HH": "%H", 49 } 50 51 class Parser(Postgres.Parser): 52 FUNCTIONS = { 53 **Postgres.Parser.FUNCTIONS, 54 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 55 this=seq_get(args, 0), 56 expression=seq_get(args, 1), 57 unit=exp.var("month"), 58 return_type=exp.DataType.build("TIMESTAMP"), 59 ), 60 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 61 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 62 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 63 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 64 "GETDATE": exp.CurrentTimestamp.from_arg_list, 65 "LISTAGG": exp.GroupConcat.from_arg_list, 66 "STRTOL": exp.FromBase.from_arg_list, 67 } 68 69 NO_PAREN_FUNCTION_PARSERS = { 70 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 71 "APPROXIMATE": lambda self: self._parse_approximate_count(), 72 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, transaction=True), 73 } 74 75 SUPPORTS_IMPLICIT_UNNEST = True 76 77 def _parse_table( 78 self, 79 schema: bool = False, 80 joins: bool = False, 81 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 82 parse_bracket: bool = False, 83 is_db_reference: bool = False, 84 parse_partition: bool = False, 85 ) -> t.Optional[exp.Expression]: 86 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 87 unpivot = self._match(TokenType.UNPIVOT) 88 table = super()._parse_table( 89 schema=schema, 90 joins=joins, 91 alias_tokens=alias_tokens, 92 parse_bracket=parse_bracket, 93 is_db_reference=is_db_reference, 94 ) 95 96 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 97 98 def _parse_convert( 99 self, strict: bool, safe: t.Optional[bool] = None 100 ) -> t.Optional[exp.Expression]: 101 to = self._parse_types() 102 self._match(TokenType.COMMA) 103 this = self._parse_bitwise() 104 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 105 106 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 107 index = self._index - 1 108 func = self._parse_function() 109 110 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 111 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 112 self._retreat(index) 113 return None 114 115 class Tokenizer(Postgres.Tokenizer): 116 BIT_STRINGS = [] 117 HEX_STRINGS = [] 118 STRING_ESCAPES = ["\\", "'"] 119 120 KEYWORDS = { 121 **Postgres.Tokenizer.KEYWORDS, 122 "HLLSKETCH": TokenType.HLLSKETCH, 123 "SUPER": TokenType.SUPER, 124 "TOP": TokenType.TOP, 125 "UNLOAD": TokenType.COMMAND, 126 "VARBYTE": TokenType.VARBINARY, 127 } 128 KEYWORDS.pop("VALUES") 129 130 # Redshift allows # to appear as a table identifier prefix 131 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 132 SINGLE_TOKENS.pop("#") 133 134 class Generator(Postgres.Generator): 135 LOCKING_READS_SUPPORTED = False 136 QUERY_HINTS = False 137 VALUES_AS_TABLE = False 138 TZ_TO_WITH_TIME_ZONE = True 139 NVL2_SUPPORTED = True 140 LAST_DAY_SUPPORTS_DATE_PART = False 141 CAN_IMPLEMENT_ARRAY_ANY = False 142 MULTI_ARG_DISTINCT = True 143 COPY_PARAMS_ARE_WRAPPED = False 144 145 TYPE_MAPPING = { 146 **Postgres.Generator.TYPE_MAPPING, 147 exp.DataType.Type.BINARY: "VARBYTE", 148 exp.DataType.Type.INT: "INTEGER", 149 exp.DataType.Type.TIMETZ: "TIME", 150 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 151 exp.DataType.Type.VARBINARY: "VARBYTE", 152 exp.DataType.Type.ROWVERSION: "VARBYTE", 153 } 154 155 TRANSFORMS = { 156 **Postgres.Generator.TRANSFORMS, 157 exp.Concat: concat_to_dpipe_sql, 158 exp.ConcatWs: concat_ws_to_dpipe_sql, 159 exp.ApproxDistinct: lambda self, 160 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 161 exp.CurrentTimestamp: lambda self, e: ( 162 "SYSDATE" if e.args.get("transaction") else "GETDATE()" 163 ), 164 exp.DateAdd: date_delta_sql("DATEADD"), 165 exp.DateDiff: date_delta_sql("DATEDIFF"), 166 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 167 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 168 exp.FromBase: rename_func("STRTOL"), 169 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 170 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 171 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 172 exp.GroupConcat: rename_func("LISTAGG"), 173 exp.ParseJSON: rename_func("JSON_PARSE"), 174 exp.Select: transforms.preprocess( 175 [ 176 transforms.eliminate_distinct_on, 177 transforms.eliminate_semi_and_anti_joins, 178 transforms.unqualify_unnest, 179 ] 180 ), 181 exp.SortKeyProperty: lambda self, 182 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 183 exp.StartsWith: lambda self, 184 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 185 exp.TableSample: no_tablesample_sql, 186 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 187 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 188 exp.UnixToTime: lambda self, 189 e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')", 190 } 191 192 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 193 TRANSFORMS.pop(exp.Pivot) 194 195 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 196 TRANSFORMS.pop(exp.Pow) 197 198 # Redshift supports ANY_VALUE(..) 199 TRANSFORMS.pop(exp.AnyValue) 200 201 # Redshift supports LAST_DAY(..) 202 TRANSFORMS.pop(exp.LastDay) 203 204 RESERVED_KEYWORDS = { 205 "aes128", 206 "aes256", 207 "all", 208 "allowoverwrite", 209 "analyse", 210 "analyze", 211 "and", 212 "any", 213 "array", 214 "as", 215 "asc", 216 "authorization", 217 "az64", 218 "backup", 219 "between", 220 "binary", 221 "blanksasnull", 222 "both", 223 "bytedict", 224 "bzip2", 225 "case", 226 "cast", 227 "check", 228 "collate", 229 "column", 230 "constraint", 231 "create", 232 "credentials", 233 "cross", 234 "current_date", 235 "current_time", 236 "current_timestamp", 237 "current_user", 238 "current_user_id", 239 "default", 240 "deferrable", 241 "deflate", 242 "defrag", 243 "delta", 244 "delta32k", 245 "desc", 246 "disable", 247 "distinct", 248 "do", 249 "else", 250 "emptyasnull", 251 "enable", 252 "encode", 253 "encrypt ", 254 "encryption", 255 "end", 256 "except", 257 "explicit", 258 "false", 259 "for", 260 "foreign", 261 "freeze", 262 "from", 263 "full", 264 "globaldict256", 265 "globaldict64k", 266 "grant", 267 "group", 268 "gzip", 269 "having", 270 "identity", 271 "ignore", 272 "ilike", 273 "in", 274 "initially", 275 "inner", 276 "intersect", 277 "interval", 278 "into", 279 "is", 280 "isnull", 281 "join", 282 "leading", 283 "left", 284 "like", 285 "limit", 286 "localtime", 287 "localtimestamp", 288 "lun", 289 "luns", 290 "lzo", 291 "lzop", 292 "minus", 293 "mostly16", 294 "mostly32", 295 "mostly8", 296 "natural", 297 "new", 298 "not", 299 "notnull", 300 "null", 301 "nulls", 302 "off", 303 "offline", 304 "offset", 305 "oid", 306 "old", 307 "on", 308 "only", 309 "open", 310 "or", 311 "order", 312 "outer", 313 "overlaps", 314 "parallel", 315 "partition", 316 "percent", 317 "permissions", 318 "pivot", 319 "placing", 320 "primary", 321 "raw", 322 "readratio", 323 "recover", 324 "references", 325 "rejectlog", 326 "resort", 327 "respect", 328 "restore", 329 "right", 330 "select", 331 "session_user", 332 "similar", 333 "snapshot", 334 "some", 335 "sysdate", 336 "system", 337 "table", 338 "tag", 339 "tdes", 340 "text255", 341 "text32k", 342 "then", 343 "timestamp", 344 "to", 345 "top", 346 "trailing", 347 "true", 348 "truncatecolumns", 349 "type", 350 "union", 351 "unique", 352 "unnest", 353 "unpivot", 354 "user", 355 "using", 356 "verbose", 357 "wallet", 358 "when", 359 "where", 360 "with", 361 "without", 362 } 363 364 def unnest_sql(self, expression: exp.Unnest) -> str: 365 args = expression.expressions 366 num_args = len(args) 367 368 if num_args > 1: 369 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 370 return "" 371 372 arg = self.sql(seq_get(args, 0)) 373 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 374 return f"{arg} AS {alias}" if alias else arg 375 376 def with_properties(self, properties: exp.Properties) -> str: 377 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 378 return self.properties(properties, prefix=" ", suffix="") 379 380 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 381 if expression.is_type(exp.DataType.Type.JSON): 382 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 383 return self.sql(expression, "this") 384 385 return super().cast_sql(expression, safe_prefix=safe_prefix) 386 387 def datatype_sql(self, expression: exp.DataType) -> str: 388 """ 389 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 390 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 391 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 392 `TEXT` to `VARCHAR`. 393 """ 394 if expression.is_type("text"): 395 expression.set("this", exp.DataType.Type.VARCHAR) 396 precision = expression.args.get("expressions") 397 398 if not precision: 399 expression.append("expressions", exp.var("MAX")) 400 401 return super().datatype_sql(expression)
Specifies the strategy according to which identifiers should be normalized.
Associates this dialect's time formats with their equivalent Python strftime
formats.
Mapping of an escaped sequence (\n
) to its unescaped version (
).
Inherited Members
- sqlglot.dialects.dialect.Dialect
- Dialect
- WEEK_OFFSET
- UNNEST_COLUMN_ONLY
- ALIAS_POST_TABLESAMPLE
- TABLESAMPLE_SIZE_IS_PERCENT
- IDENTIFIERS_CAN_START_WITH_DIGIT
- DPIPE_IS_STRING_CONCAT
- STRICT_STRING_CONCAT
- SUPPORTS_SEMI_ANTI_JOIN
- NORMALIZE_FUNCTIONS
- LOG_BASE_FIRST
- SAFE_DIVISION
- DATE_FORMAT
- DATEINT_FORMAT
- FORMAT_MAPPING
- PSEUDOCOLUMNS
- PREFER_CTE_ALIAS_COLUMN
- get_or_raise
- format_time
- normalize_identifier
- case_sensitive
- can_identify
- quote_identifier
- to_json_path
- parse
- parse_into
- generate
- transpile
- tokenize
- tokenizer
- parser
- generator
51 class Parser(Postgres.Parser): 52 FUNCTIONS = { 53 **Postgres.Parser.FUNCTIONS, 54 "ADD_MONTHS": lambda args: exp.TsOrDsAdd( 55 this=seq_get(args, 0), 56 expression=seq_get(args, 1), 57 unit=exp.var("month"), 58 return_type=exp.DataType.build("TIMESTAMP"), 59 ), 60 "DATEADD": _build_date_delta(exp.TsOrDsAdd), 61 "DATE_ADD": _build_date_delta(exp.TsOrDsAdd), 62 "DATEDIFF": _build_date_delta(exp.TsOrDsDiff), 63 "DATE_DIFF": _build_date_delta(exp.TsOrDsDiff), 64 "GETDATE": exp.CurrentTimestamp.from_arg_list, 65 "LISTAGG": exp.GroupConcat.from_arg_list, 66 "STRTOL": exp.FromBase.from_arg_list, 67 } 68 69 NO_PAREN_FUNCTION_PARSERS = { 70 **Postgres.Parser.NO_PAREN_FUNCTION_PARSERS, 71 "APPROXIMATE": lambda self: self._parse_approximate_count(), 72 "SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, transaction=True), 73 } 74 75 SUPPORTS_IMPLICIT_UNNEST = True 76 77 def _parse_table( 78 self, 79 schema: bool = False, 80 joins: bool = False, 81 alias_tokens: t.Optional[t.Collection[TokenType]] = None, 82 parse_bracket: bool = False, 83 is_db_reference: bool = False, 84 parse_partition: bool = False, 85 ) -> t.Optional[exp.Expression]: 86 # Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr` 87 unpivot = self._match(TokenType.UNPIVOT) 88 table = super()._parse_table( 89 schema=schema, 90 joins=joins, 91 alias_tokens=alias_tokens, 92 parse_bracket=parse_bracket, 93 is_db_reference=is_db_reference, 94 ) 95 96 return self.expression(exp.Pivot, this=table, unpivot=True) if unpivot else table 97 98 def _parse_convert( 99 self, strict: bool, safe: t.Optional[bool] = None 100 ) -> t.Optional[exp.Expression]: 101 to = self._parse_types() 102 self._match(TokenType.COMMA) 103 this = self._parse_bitwise() 104 return self.expression(exp.TryCast, this=this, to=to, safe=safe) 105 106 def _parse_approximate_count(self) -> t.Optional[exp.ApproxDistinct]: 107 index = self._index - 1 108 func = self._parse_function() 109 110 if isinstance(func, exp.Count) and isinstance(func.this, exp.Distinct): 111 return self.expression(exp.ApproxDistinct, this=seq_get(func.this.expressions, 0)) 112 self._retreat(index) 113 return None
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
Inherited Members
- sqlglot.parser.Parser
- Parser
- NO_PAREN_FUNCTIONS
- STRUCT_TYPE_TOKENS
- NESTED_TYPE_TOKENS
- ENUM_TYPE_TOKENS
- AGGREGATE_TYPE_TOKENS
- TYPE_TOKENS
- SIGNED_TO_UNSIGNED_TYPE_TOKEN
- SUBQUERY_PREDICATES
- RESERVED_TOKENS
- DB_CREATABLES
- CREATABLES
- ID_VAR_TOKENS
- INTERVAL_VARS
- TABLE_ALIAS_TOKENS
- ALIAS_TOKENS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- EQUALITY
- COMPARISON
- TERM
- FACTOR
- TIMES
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- EXPRESSION_PARSERS
- UNARY_PARSERS
- STRING_PARSERS
- NUMERIC_PARSERS
- PRIMARY_PARSERS
- PLACEHOLDER_PARSERS
- CONSTRAINT_PARSERS
- ALTER_PARSERS
- ALTER_ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- INVALID_FUNC_NAME_TOKENS
- FUNCTIONS_WITH_ALIASED_ARGS
- KEY_VALUE_DEFINITIONS
- QUERY_MODIFIER_PARSERS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_LITERAL_PARSERS
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- CONFLICT_ACTIONS
- CREATE_SEQUENCE
- ISOLATED_LOADING_OPTIONS
- USABLES
- CAST_ACTIONS
- INSERT_ALTERNATIVES
- CLONE_KEYWORDS
- HISTORICAL_DATA_KIND
- OPCLASS_FOLLOW_KEYWORDS
- OPTYPE_FOLLOW_TOKENS
- TABLE_INDEX_HINT_TOKENS
- VIEW_ATTRIBUTES
- WINDOW_ALIAS_TOKENS
- WINDOW_BEFORE_PAREN_TOKENS
- WINDOW_SIDES
- JSON_KEY_VALUE_SEPARATOR_TOKENS
- FETCH_TOKENS
- ADD_CONSTRAINT_TOKENS
- DISTINCT_TOKENS
- NULL_TOKENS
- UNNEST_OFFSET_ALIAS_TOKENS
- SELECT_START_TOKENS
- STRICT_CAST
- PREFIXED_PIVOT_COLUMNS
- IDENTIFY_PIVOT_STRINGS
- LOG_DEFAULTS_TO_LN
- ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
- TABLESAMPLE_CSV
- SET_REQUIRES_ASSIGNMENT_DELIMITER
- TRIM_PATTERN_FIRST
- STRING_ALIASES
- MODIFIERS_ATTACHED_TO_UNION
- UNION_MODIFIERS
- NO_PAREN_IF_COMMANDS
- VALUES_FOLLOWED_BY_PAREN
- INTERVAL_SPANS
- SUPPORTS_PARTITION_SELECTION
- error_level
- error_message_context
- max_errors
- dialect
- reset
- parse
- parse_into
- check_errors
- raise_error
- expression
- validate_expression
- errors
- sql
115 class Tokenizer(Postgres.Tokenizer): 116 BIT_STRINGS = [] 117 HEX_STRINGS = [] 118 STRING_ESCAPES = ["\\", "'"] 119 120 KEYWORDS = { 121 **Postgres.Tokenizer.KEYWORDS, 122 "HLLSKETCH": TokenType.HLLSKETCH, 123 "SUPER": TokenType.SUPER, 124 "TOP": TokenType.TOP, 125 "UNLOAD": TokenType.COMMAND, 126 "VARBYTE": TokenType.VARBINARY, 127 } 128 KEYWORDS.pop("VALUES") 129 130 # Redshift allows # to appear as a table identifier prefix 131 SINGLE_TOKENS = Postgres.Tokenizer.SINGLE_TOKENS.copy() 132 SINGLE_TOKENS.pop("#")
Inherited Members
134 class Generator(Postgres.Generator): 135 LOCKING_READS_SUPPORTED = False 136 QUERY_HINTS = False 137 VALUES_AS_TABLE = False 138 TZ_TO_WITH_TIME_ZONE = True 139 NVL2_SUPPORTED = True 140 LAST_DAY_SUPPORTS_DATE_PART = False 141 CAN_IMPLEMENT_ARRAY_ANY = False 142 MULTI_ARG_DISTINCT = True 143 COPY_PARAMS_ARE_WRAPPED = False 144 145 TYPE_MAPPING = { 146 **Postgres.Generator.TYPE_MAPPING, 147 exp.DataType.Type.BINARY: "VARBYTE", 148 exp.DataType.Type.INT: "INTEGER", 149 exp.DataType.Type.TIMETZ: "TIME", 150 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 151 exp.DataType.Type.VARBINARY: "VARBYTE", 152 exp.DataType.Type.ROWVERSION: "VARBYTE", 153 } 154 155 TRANSFORMS = { 156 **Postgres.Generator.TRANSFORMS, 157 exp.Concat: concat_to_dpipe_sql, 158 exp.ConcatWs: concat_ws_to_dpipe_sql, 159 exp.ApproxDistinct: lambda self, 160 e: f"APPROXIMATE COUNT(DISTINCT {self.sql(e, 'this')})", 161 exp.CurrentTimestamp: lambda self, e: ( 162 "SYSDATE" if e.args.get("transaction") else "GETDATE()" 163 ), 164 exp.DateAdd: date_delta_sql("DATEADD"), 165 exp.DateDiff: date_delta_sql("DATEDIFF"), 166 exp.DistKeyProperty: lambda self, e: self.func("DISTKEY", e.this), 167 exp.DistStyleProperty: lambda self, e: self.naked_property(e), 168 exp.FromBase: rename_func("STRTOL"), 169 exp.GeneratedAsIdentityColumnConstraint: generatedasidentitycolumnconstraint_sql, 170 exp.JSONExtract: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 171 exp.JSONExtractScalar: json_extract_segments("JSON_EXTRACT_PATH_TEXT"), 172 exp.GroupConcat: rename_func("LISTAGG"), 173 exp.ParseJSON: rename_func("JSON_PARSE"), 174 exp.Select: transforms.preprocess( 175 [ 176 transforms.eliminate_distinct_on, 177 transforms.eliminate_semi_and_anti_joins, 178 transforms.unqualify_unnest, 179 ] 180 ), 181 exp.SortKeyProperty: lambda self, 182 e: f"{'COMPOUND ' if e.args['compound'] else ''}SORTKEY({self.format_args(*e.this)})", 183 exp.StartsWith: lambda self, 184 e: f"{self.sql(e.this)} LIKE {self.sql(e.expression)} || '%'", 185 exp.TableSample: no_tablesample_sql, 186 exp.TsOrDsAdd: date_delta_sql("DATEADD"), 187 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 188 exp.UnixToTime: lambda self, 189 e: f"(TIMESTAMP 'epoch' + {self.sql(e.this)} * INTERVAL '1 SECOND')", 190 } 191 192 # Postgres maps exp.Pivot to no_pivot_sql, but Redshift support pivots 193 TRANSFORMS.pop(exp.Pivot) 194 195 # Redshift uses the POW | POWER (expr1, expr2) syntax instead of expr1 ^ expr2 (postgres) 196 TRANSFORMS.pop(exp.Pow) 197 198 # Redshift supports ANY_VALUE(..) 199 TRANSFORMS.pop(exp.AnyValue) 200 201 # Redshift supports LAST_DAY(..) 202 TRANSFORMS.pop(exp.LastDay) 203 204 RESERVED_KEYWORDS = { 205 "aes128", 206 "aes256", 207 "all", 208 "allowoverwrite", 209 "analyse", 210 "analyze", 211 "and", 212 "any", 213 "array", 214 "as", 215 "asc", 216 "authorization", 217 "az64", 218 "backup", 219 "between", 220 "binary", 221 "blanksasnull", 222 "both", 223 "bytedict", 224 "bzip2", 225 "case", 226 "cast", 227 "check", 228 "collate", 229 "column", 230 "constraint", 231 "create", 232 "credentials", 233 "cross", 234 "current_date", 235 "current_time", 236 "current_timestamp", 237 "current_user", 238 "current_user_id", 239 "default", 240 "deferrable", 241 "deflate", 242 "defrag", 243 "delta", 244 "delta32k", 245 "desc", 246 "disable", 247 "distinct", 248 "do", 249 "else", 250 "emptyasnull", 251 "enable", 252 "encode", 253 "encrypt ", 254 "encryption", 255 "end", 256 "except", 257 "explicit", 258 "false", 259 "for", 260 "foreign", 261 "freeze", 262 "from", 263 "full", 264 "globaldict256", 265 "globaldict64k", 266 "grant", 267 "group", 268 "gzip", 269 "having", 270 "identity", 271 "ignore", 272 "ilike", 273 "in", 274 "initially", 275 "inner", 276 "intersect", 277 "interval", 278 "into", 279 "is", 280 "isnull", 281 "join", 282 "leading", 283 "left", 284 "like", 285 "limit", 286 "localtime", 287 "localtimestamp", 288 "lun", 289 "luns", 290 "lzo", 291 "lzop", 292 "minus", 293 "mostly16", 294 "mostly32", 295 "mostly8", 296 "natural", 297 "new", 298 "not", 299 "notnull", 300 "null", 301 "nulls", 302 "off", 303 "offline", 304 "offset", 305 "oid", 306 "old", 307 "on", 308 "only", 309 "open", 310 "or", 311 "order", 312 "outer", 313 "overlaps", 314 "parallel", 315 "partition", 316 "percent", 317 "permissions", 318 "pivot", 319 "placing", 320 "primary", 321 "raw", 322 "readratio", 323 "recover", 324 "references", 325 "rejectlog", 326 "resort", 327 "respect", 328 "restore", 329 "right", 330 "select", 331 "session_user", 332 "similar", 333 "snapshot", 334 "some", 335 "sysdate", 336 "system", 337 "table", 338 "tag", 339 "tdes", 340 "text255", 341 "text32k", 342 "then", 343 "timestamp", 344 "to", 345 "top", 346 "trailing", 347 "true", 348 "truncatecolumns", 349 "type", 350 "union", 351 "unique", 352 "unnest", 353 "unpivot", 354 "user", 355 "using", 356 "verbose", 357 "wallet", 358 "when", 359 "where", 360 "with", 361 "without", 362 } 363 364 def unnest_sql(self, expression: exp.Unnest) -> str: 365 args = expression.expressions 366 num_args = len(args) 367 368 if num_args > 1: 369 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 370 return "" 371 372 arg = self.sql(seq_get(args, 0)) 373 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 374 return f"{arg} AS {alias}" if alias else arg 375 376 def with_properties(self, properties: exp.Properties) -> str: 377 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 378 return self.properties(properties, prefix=" ", suffix="") 379 380 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 381 if expression.is_type(exp.DataType.Type.JSON): 382 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 383 return self.sql(expression, "this") 384 385 return super().cast_sql(expression, safe_prefix=safe_prefix) 386 387 def datatype_sql(self, expression: exp.DataType) -> str: 388 """ 389 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 390 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 391 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 392 `TEXT` to `VARCHAR`. 393 """ 394 if expression.is_type("text"): 395 expression.set("this", exp.DataType.Type.VARCHAR) 396 precision = expression.args.get("expressions") 397 398 if not precision: 399 expression.append("expressions", exp.var("MAX")) 400 401 return super().datatype_sql(expression)
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether to normalize identifiers to lowercase. Default: False.
- pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
- indent: The indentation size in a formatted string. For example, this affects the
indentation of subqueries and filters under a
WHERE
clause. Default: 2. - normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether to preserve comments in the output SQL code. Default: True
364 def unnest_sql(self, expression: exp.Unnest) -> str: 365 args = expression.expressions 366 num_args = len(args) 367 368 if num_args > 1: 369 self.unsupported(f"Unsupported number of arguments in UNNEST: {num_args}") 370 return "" 371 372 arg = self.sql(seq_get(args, 0)) 373 alias = self.expressions(expression.args.get("alias"), key="columns", flat=True) 374 return f"{arg} AS {alias}" if alias else arg
376 def with_properties(self, properties: exp.Properties) -> str: 377 """Redshift doesn't have `WITH` as part of their with_properties so we remove it""" 378 return self.properties(properties, prefix=" ", suffix="")
Redshift doesn't have WITH
as part of their with_properties so we remove it
380 def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str: 381 if expression.is_type(exp.DataType.Type.JSON): 382 # Redshift doesn't support a JSON type, so casting to it is treated as a noop 383 return self.sql(expression, "this") 384 385 return super().cast_sql(expression, safe_prefix=safe_prefix)
387 def datatype_sql(self, expression: exp.DataType) -> str: 388 """ 389 Redshift converts the `TEXT` data type to `VARCHAR(255)` by default when people more generally mean 390 VARCHAR of max length which is `VARCHAR(max)` in Redshift. Therefore if we get a `TEXT` data type 391 without precision we convert it to `VARCHAR(max)` and if it does have precision then we just convert 392 `TEXT` to `VARCHAR`. 393 """ 394 if expression.is_type("text"): 395 expression.set("this", exp.DataType.Type.VARCHAR) 396 precision = expression.args.get("expressions") 397 398 if not precision: 399 expression.append("expressions", exp.var("MAX")) 400 401 return super().datatype_sql(expression)
Redshift converts the TEXT
data type to VARCHAR(255)
by default when people more generally mean
VARCHAR of max length which is VARCHAR(max)
in Redshift. Therefore if we get a TEXT
data type
without precision we convert it to VARCHAR(max)
and if it does have precision then we just convert
TEXT
to VARCHAR
.
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- IGNORE_NULLS_IN_FUNC
- EXPLICIT_UNION
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- INTERVAL_ALLOWS_PLURAL_FORM
- LIMIT_FETCH
- LIMIT_ONLY_LITERALS
- GROUPINGS_SEP
- INDEX_ON
- QUERY_HINT_SEP
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- COLUMN_JOIN_MARKS_SUPPORTED
- EXTRACT_ALLOWS_QUOTES
- ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
- UNNEST_WITH_ORDINALITY
- AGGREGATE_FILTER_SUPPORTED
- SEMI_ANTI_JOIN_WITH_SIDE
- COMPUTED_COLUMN_WITH_TYPE
- SUPPORTS_TABLE_COPY
- TABLESAMPLE_REQUIRES_PARENS
- TABLESAMPLE_KEYWORDS
- TABLESAMPLE_WITH_METHOD
- COLLATE_IS_FUNC
- DATA_TYPE_SPECIFIERS_ALLOWED
- ENSURE_BOOLS
- CTE_RECURSIVE_KEYWORD_REQUIRED
- SUPPORTS_SINGLE_ARG_CONCAT
- SUPPORTS_TABLE_ALIAS_COLUMNS
- UNPIVOT_ALIASES_ARE_IDENTIFIERS
- JSON_KEY_VALUE_PAIR_SEP
- INSERT_OVERWRITE
- SUPPORTS_CREATE_TABLE_LIKE
- JSON_PATH_BRACKETED_KEY_SUPPORTED
- JSON_PATH_SINGLE_QUOTE_ESCAPE
- SUPPORTS_TO_NUMBER
- OUTER_UNION_MODIFIERS
- COPY_PARAMS_EQ_REQUIRED
- STAR_MAPPING
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- NAMED_PLACEHOLDER_TOKEN
- WITH_SEPARATED_COMMENTS
- EXCLUDE_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- PARAMETERIZABLE_TEXT_TYPES
- EXPRESSIONS_WITHOUT_NESTED_CTES
- SENTINEL_LINE_BREAK
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- dialect
- normalize_functions
- unsupported_messages
- generate
- preprocess
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_parts
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- computedcolumnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- generatedasrowcolumnconstraint_sql
- periodforsystemtimeconstraint_sql
- notnullcolumnconstraint_sql
- transformcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- createable_sql
- create_sql
- sequenceproperties_sql
- clone_sql
- describe_sql
- heredoc_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- unicodestring_sql
- rawstring_sql
- datatypeparam_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- except_op
- fetch_sql
- filter_sql
- hint_sql
- indexparameters_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_name
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- partitionboundspec_sql
- partitionedofproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- withsystemversioningproperty_sql
- insert_sql
- intersect_sql
- intersect_op
- introducer_sql
- kill_sql
- pseudotype_sql
- objectidentifier_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- historicaldata_sql
- table_parts
- table_sql
- tablesample_sql
- pivot_sql
- version_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- connect_sql
- prior_sql
- join_sql
- lambda_sql
- lateral_op
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- withfill_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognizemeasure_sql
- matchrecognize_sql
- query_modifiers
- queryoption_sql
- offset_limit_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- set_operations
- union_sql
- union_op
- prewhere_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_offset_expressions
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- convert_concat_args
- concat_sql
- concatws_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- jsonkeyvalue_sql
- jsonpath_sql
- json_path_part
- formatjson_sql
- jsonobject_sql
- jsonobjectagg_sql
- jsonarray_sql
- jsonarrayagg_sql
- jsoncolumndef_sql
- jsonschema_sql
- jsontable_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- pivotalias_sql
- aliases_sql
- atindex_sql
- attimezone_sql
- fromtimezone_sql
- add_sql
- and_sql
- or_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- currentdate_sql
- currenttimestamp_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- alterdiststyle_sql
- altersortkey_sql
- renametable_sql
- renamecolumn_sql
- altertable_sql
- add_column_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- havingmax_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- propertyeq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- slice_sql
- sub_sql
- trycast_sql
- try_sql
- log_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- too_wide
- format_time
- expressions
- op_expressions
- naked_property
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql
- tonumber_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- checkcolumnconstraint_sql
- indexcolumnconstraint_sql
- nvl2_sql
- comprehension_sql
- columnprefix_sql
- opclass_sql
- predict_sql
- forin_sql
- refresh_sql
- operator_sql
- toarray_sql
- tsordstotime_sql
- tsordstotimestamp_sql
- tsordstodate_sql
- unixdate_sql
- lastday_sql
- dateadd_sql
- arrayany_sql
- generateseries_sql
- struct_sql
- partitionrange_sql
- truncatetable_sql
- convert_sql
- copyparameter_sql
- credentials_sql
- copy_sql
- semicolon_sql
- sqlglot.dialects.postgres.Postgres.Generator
- SINGLE_STRING_INTERVAL
- RENAME_TABLE_WITH_DB
- JOIN_HINTS
- TABLE_HINTS
- PARAMETER_TOKEN
- TABLESAMPLE_SIZE_IS_ROWS
- TABLESAMPLE_SEED_KEYWORD
- SUPPORTS_SELECT_INTO
- JSON_TYPE_REQUIRED_FOR_EXTRACTION
- SUPPORTS_UNLOGGED_TABLES
- LIKE_PROPERTY_INSIDE_SCHEMA
- COPY_HAS_INTO_KEYWORD
- SUPPORTED_JSON_PATH_PARTS
- PROPERTIES_LOCATION
- schemacommentproperty_sql
- commentcolumnconstraint_sql
- bracket_sql
- matchagainst_sql