Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    build_default_decimal_type,
  11    build_timestamp_from_parts,
  12    date_delta_sql,
  13    date_trunc_to_time,
  14    datestrtodate_sql,
  15    build_formatted_time,
  16    if_sql,
  17    inline_array_sql,
  18    max_or_greatest,
  19    min_or_least,
  20    rename_func,
  21    timestamptrunc_sql,
  22    timestrtotime_sql,
  23    var_map_sql,
  24    map_date_part,
  25)
  26from sqlglot.helper import flatten, is_float, is_int, seq_get
  27from sqlglot.tokens import TokenType
  28
  29if t.TYPE_CHECKING:
  30    from sqlglot._typing import E
  31
  32
  33# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  34def _build_datetime(
  35    name: str, kind: exp.DataType.Type, safe: bool = False
  36) -> t.Callable[[t.List], exp.Func]:
  37    def _builder(args: t.List) -> exp.Func:
  38        value = seq_get(args, 0)
  39        int_value = value is not None and is_int(value.name)
  40
  41        if isinstance(value, exp.Literal):
  42            # Converts calls like `TO_TIME('01:02:03')` into casts
  43            if len(args) == 1 and value.is_string and not int_value:
  44                return (
  45                    exp.TryCast(this=value, to=exp.DataType.build(kind))
  46                    if safe
  47                    else exp.cast(value, kind)
  48                )
  49
  50            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  51            # cases so we can transpile them, since they're relatively common
  52            if kind == exp.DataType.Type.TIMESTAMP:
  53                if int_value and not safe:
  54                    # TRY_TO_TIMESTAMP('integer') is not parsed into exp.UnixToTime as
  55                    # it's not easily transpilable
  56                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  57                if not is_float(value.this):
  58                    expr = build_formatted_time(exp.StrToTime, "snowflake")(args)
  59                    expr.set("safe", safe)
  60                    return expr
  61
  62        if kind == exp.DataType.Type.DATE and not int_value:
  63            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  64            formatted_exp.set("safe", safe)
  65            return formatted_exp
  66
  67        return exp.Anonymous(this=name, expressions=args)
  68
  69    return _builder
  70
  71
  72def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  73    expression = parser.build_var_map(args)
  74
  75    if isinstance(expression, exp.StarMap):
  76        return expression
  77
  78    return exp.Struct(
  79        expressions=[
  80            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  81        ]
  82    )
  83
  84
  85def _build_datediff(args: t.List) -> exp.DateDiff:
  86    return exp.DateDiff(
  87        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  88    )
  89
  90
  91def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
  92    def _builder(args: t.List) -> E:
  93        return expr_type(
  94            this=seq_get(args, 2),
  95            expression=seq_get(args, 1),
  96            unit=map_date_part(seq_get(args, 0)),
  97        )
  98
  99    return _builder
 100
 101
 102# https://docs.snowflake.com/en/sql-reference/functions/div0
 103def _build_if_from_div0(args: t.List) -> exp.If:
 104    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0)).and_(
 105        exp.Is(this=seq_get(args, 0), expression=exp.null()).not_()
 106    )
 107    true = exp.Literal.number(0)
 108    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
 109    return exp.If(this=cond, true=true, false=false)
 110
 111
 112# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 113def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 114    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 115    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 116
 117
 118# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 119def _build_if_from_nullifzero(args: t.List) -> exp.If:
 120    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 121    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 122
 123
 124def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 125    flag = expression.text("flag")
 126
 127    if "i" not in flag:
 128        flag += "i"
 129
 130    return self.func(
 131        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 132    )
 133
 134
 135def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 136    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 137
 138    if not regexp_replace.args.get("replacement"):
 139        regexp_replace.set("replacement", exp.Literal.string(""))
 140
 141    return regexp_replace
 142
 143
 144def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 145    def _parse(self: Snowflake.Parser) -> exp.Show:
 146        return self._parse_show_snowflake(*args, **kwargs)
 147
 148    return _parse
 149
 150
 151def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 152    trunc = date_trunc_to_time(args)
 153    trunc.set("unit", map_date_part(trunc.args["unit"]))
 154    return trunc
 155
 156
 157def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 158    """
 159    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 160    so we need to unqualify them.
 161
 162    Example:
 163        >>> from sqlglot import parse_one
 164        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 165        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 166        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 167    """
 168    if isinstance(expression, exp.Pivot) and expression.unpivot:
 169        expression = transforms.unqualify_columns(expression)
 170
 171    return expression
 172
 173
 174def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 175    assert isinstance(expression, exp.Create)
 176
 177    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 178        if expression.this in exp.DataType.NESTED_TYPES:
 179            expression.set("expressions", None)
 180        return expression
 181
 182    props = expression.args.get("properties")
 183    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 184        for schema_expression in expression.this.expressions:
 185            if isinstance(schema_expression, exp.ColumnDef):
 186                column_type = schema_expression.kind
 187                if isinstance(column_type, exp.DataType):
 188                    column_type.transform(_flatten_structured_type, copy=False)
 189
 190    return expression
 191
 192
 193def _unnest_generate_date_array(expression: exp.Expression) -> exp.Expression:
 194    if isinstance(expression, exp.Select):
 195        for unnest in expression.find_all(exp.Unnest):
 196            if (
 197                isinstance(unnest.parent, (exp.From, exp.Join))
 198                and len(unnest.expressions) == 1
 199                and isinstance(unnest.expressions[0], exp.GenerateDateArray)
 200            ):
 201                generate_date_array = unnest.expressions[0]
 202                start = generate_date_array.args.get("start")
 203                end = generate_date_array.args.get("end")
 204                step = generate_date_array.args.get("step")
 205
 206                if not start or not end or not isinstance(step, exp.Interval) or step.name != "1":
 207                    continue
 208
 209                unit = step.args.get("unit")
 210
 211                unnest_alias = unnest.args.get("alias")
 212                if unnest_alias:
 213                    unnest_alias = unnest_alias.copy()
 214                    sequence_value_name = seq_get(unnest_alias.columns, 0) or "value"
 215                else:
 216                    sequence_value_name = "value"
 217
 218                # We'll add the next sequence value to the starting date and project the result
 219                date_add = _build_date_time_add(exp.DateAdd)(
 220                    [unit, exp.cast(sequence_value_name, "int"), exp.cast(start, "date")]
 221                ).as_(sequence_value_name)
 222
 223                # We use DATEDIFF to compute the number of sequence values needed
 224                number_sequence = Snowflake.Parser.FUNCTIONS["ARRAY_GENERATE_RANGE"](
 225                    [exp.Literal.number(0), _build_datediff([unit, start, end]) + 1]
 226                )
 227
 228                unnest.set("expressions", [number_sequence])
 229                unnest.replace(exp.select(date_add).from_(unnest.copy()).subquery(unnest_alias))
 230
 231    return expression
 232
 233
 234class Snowflake(Dialect):
 235    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 236    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 237    NULL_ORDERING = "nulls_are_large"
 238    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 239    SUPPORTS_USER_DEFINED_TYPES = False
 240    SUPPORTS_SEMI_ANTI_JOIN = False
 241    PREFER_CTE_ALIAS_COLUMN = True
 242    TABLESAMPLE_SIZE_IS_PERCENT = True
 243    COPY_PARAMS_ARE_CSV = False
 244    ARRAY_AGG_INCLUDES_NULLS = None
 245
 246    TIME_MAPPING = {
 247        "YYYY": "%Y",
 248        "yyyy": "%Y",
 249        "YY": "%y",
 250        "yy": "%y",
 251        "MMMM": "%B",
 252        "mmmm": "%B",
 253        "MON": "%b",
 254        "mon": "%b",
 255        "MM": "%m",
 256        "mm": "%m",
 257        "DD": "%d",
 258        "dd": "%-d",
 259        "DY": "%a",
 260        "dy": "%w",
 261        "HH24": "%H",
 262        "hh24": "%H",
 263        "HH12": "%I",
 264        "hh12": "%I",
 265        "MI": "%M",
 266        "mi": "%M",
 267        "SS": "%S",
 268        "ss": "%S",
 269        "FF": "%f",
 270        "ff": "%f",
 271        "FF6": "%f",
 272        "ff6": "%f",
 273    }
 274
 275    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 276        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 277        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 278        if (
 279            isinstance(expression, exp.Identifier)
 280            and isinstance(expression.parent, exp.Table)
 281            and expression.name.lower() == "dual"
 282        ):
 283            return expression  # type: ignore
 284
 285        return super().quote_identifier(expression, identify=identify)
 286
 287    class Parser(parser.Parser):
 288        IDENTIFY_PIVOT_STRINGS = True
 289        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 290        COLON_IS_VARIANT_EXTRACT = True
 291
 292        ID_VAR_TOKENS = {
 293            *parser.Parser.ID_VAR_TOKENS,
 294            TokenType.MATCH_CONDITION,
 295        }
 296
 297        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 298        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 299
 300        FUNCTIONS = {
 301            **parser.Parser.FUNCTIONS,
 302            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 303            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 304            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 305                this=seq_get(args, 1), expression=seq_get(args, 0)
 306            ),
 307            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 308                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 309                start=seq_get(args, 0),
 310                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 311                step=seq_get(args, 2),
 312            ),
 313            "BITXOR": binary_from_function(exp.BitwiseXor),
 314            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 315            "BOOLXOR": binary_from_function(exp.Xor),
 316            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 317            "DATE_TRUNC": _date_trunc_to_time,
 318            "DATEADD": _build_date_time_add(exp.DateAdd),
 319            "DATEDIFF": _build_datediff,
 320            "DIV0": _build_if_from_div0,
 321            "FLATTEN": exp.Explode.from_arg_list,
 322            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 323                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 324            ),
 325            "IFF": exp.If.from_arg_list,
 326            "LAST_DAY": lambda args: exp.LastDay(
 327                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 328            ),
 329            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 330            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 331            "LISTAGG": exp.GroupConcat.from_arg_list,
 332            "MEDIAN": lambda args: exp.PercentileCont(
 333                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 334            ),
 335            "NULLIFZERO": _build_if_from_nullifzero,
 336            "OBJECT_CONSTRUCT": _build_object_construct,
 337            "REGEXP_REPLACE": _build_regexp_replace,
 338            "REGEXP_SUBSTR": lambda args: exp.RegexpExtract(
 339                this=seq_get(args, 0),
 340                expression=seq_get(args, 1),
 341                position=seq_get(args, 2),
 342                occurrence=seq_get(args, 3),
 343                parameters=seq_get(args, 4),
 344                group=seq_get(args, 5) or exp.Literal.number(0),
 345            ),
 346            "RLIKE": exp.RegexpLike.from_arg_list,
 347            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 348            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 349            "TIMEDIFF": _build_datediff,
 350            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 351            "TIMESTAMPDIFF": _build_datediff,
 352            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 353            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 354            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 355            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 356            "TRY_TO_TIMESTAMP": _build_datetime(
 357                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 358            ),
 359            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 360            "TO_NUMBER": lambda args: exp.ToNumber(
 361                this=seq_get(args, 0),
 362                format=seq_get(args, 1),
 363                precision=seq_get(args, 2),
 364                scale=seq_get(args, 3),
 365            ),
 366            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 367            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 368            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 369            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 370            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 371            "TO_VARCHAR": exp.ToChar.from_arg_list,
 372            "ZEROIFNULL": _build_if_from_zeroifnull,
 373        }
 374
 375        FUNCTION_PARSERS = {
 376            **parser.Parser.FUNCTION_PARSERS,
 377            "DATE_PART": lambda self: self._parse_date_part(),
 378            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 379        }
 380        FUNCTION_PARSERS.pop("TRIM")
 381
 382        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 383
 384        RANGE_PARSERS = {
 385            **parser.Parser.RANGE_PARSERS,
 386            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 387            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 388        }
 389
 390        ALTER_PARSERS = {
 391            **parser.Parser.ALTER_PARSERS,
 392            "UNSET": lambda self: self.expression(
 393                exp.Set,
 394                tag=self._match_text_seq("TAG"),
 395                expressions=self._parse_csv(self._parse_id_var),
 396                unset=True,
 397            ),
 398        }
 399
 400        STATEMENT_PARSERS = {
 401            **parser.Parser.STATEMENT_PARSERS,
 402            TokenType.SHOW: lambda self: self._parse_show(),
 403        }
 404
 405        PROPERTY_PARSERS = {
 406            **parser.Parser.PROPERTY_PARSERS,
 407            "LOCATION": lambda self: self._parse_location_property(),
 408        }
 409
 410        TYPE_CONVERTERS = {
 411            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 412            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 413        }
 414
 415        SHOW_PARSERS = {
 416            "SCHEMAS": _show_parser("SCHEMAS"),
 417            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 418            "OBJECTS": _show_parser("OBJECTS"),
 419            "TERSE OBJECTS": _show_parser("OBJECTS"),
 420            "TABLES": _show_parser("TABLES"),
 421            "TERSE TABLES": _show_parser("TABLES"),
 422            "VIEWS": _show_parser("VIEWS"),
 423            "TERSE VIEWS": _show_parser("VIEWS"),
 424            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 425            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 426            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 427            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 428            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 429            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 430            "SEQUENCES": _show_parser("SEQUENCES"),
 431            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 432            "COLUMNS": _show_parser("COLUMNS"),
 433            "USERS": _show_parser("USERS"),
 434            "TERSE USERS": _show_parser("USERS"),
 435        }
 436
 437        CONSTRAINT_PARSERS = {
 438            **parser.Parser.CONSTRAINT_PARSERS,
 439            "WITH": lambda self: self._parse_with_constraint(),
 440            "MASKING": lambda self: self._parse_with_constraint(),
 441            "PROJECTION": lambda self: self._parse_with_constraint(),
 442            "TAG": lambda self: self._parse_with_constraint(),
 443        }
 444
 445        STAGED_FILE_SINGLE_TOKENS = {
 446            TokenType.DOT,
 447            TokenType.MOD,
 448            TokenType.SLASH,
 449        }
 450
 451        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 452
 453        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 454
 455        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 456
 457        LAMBDAS = {
 458            **parser.Parser.LAMBDAS,
 459            TokenType.ARROW: lambda self, expressions: self.expression(
 460                exp.Lambda,
 461                this=self._replace_lambda(
 462                    self._parse_assignment(),
 463                    expressions,
 464                ),
 465                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 466            ),
 467        }
 468
 469        def _negate_range(
 470            self, this: t.Optional[exp.Expression] = None
 471        ) -> t.Optional[exp.Expression]:
 472            if not this:
 473                return this
 474
 475            query = this.args.get("query")
 476            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 477                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 478                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 479                # which can produce different results (most likely a SnowFlake bug).
 480                #
 481                # https://docs.snowflake.com/en/sql-reference/functions/in
 482                # Context: https://github.com/tobymao/sqlglot/issues/3890
 483                return self.expression(
 484                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 485                )
 486
 487            return self.expression(exp.Not, this=this)
 488
 489        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 490            if self._prev.token_type != TokenType.WITH:
 491                self._retreat(self._index - 1)
 492
 493            if self._match_text_seq("MASKING", "POLICY"):
 494                policy = self._parse_column()
 495                return self.expression(
 496                    exp.MaskingPolicyColumnConstraint,
 497                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 498                    expressions=self._match(TokenType.USING)
 499                    and self._parse_wrapped_csv(self._parse_id_var),
 500                )
 501            if self._match_text_seq("PROJECTION", "POLICY"):
 502                policy = self._parse_column()
 503                return self.expression(
 504                    exp.ProjectionPolicyColumnConstraint,
 505                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 506                )
 507            if self._match(TokenType.TAG):
 508                return self.expression(
 509                    exp.TagColumnConstraint,
 510                    expressions=self._parse_wrapped_csv(self._parse_property),
 511                )
 512
 513            return None
 514
 515        def _parse_create(self) -> exp.Create | exp.Command:
 516            expression = super()._parse_create()
 517            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 518                # Replace the Table node with the enclosed Identifier
 519                expression.this.replace(expression.this.this)
 520
 521            return expression
 522
 523        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 524        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 525        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 526            this = self._parse_var() or self._parse_type()
 527
 528            if not this:
 529                return None
 530
 531            self._match(TokenType.COMMA)
 532            expression = self._parse_bitwise()
 533            this = map_date_part(this)
 534            name = this.name.upper()
 535
 536            if name.startswith("EPOCH"):
 537                if name == "EPOCH_MILLISECOND":
 538                    scale = 10**3
 539                elif name == "EPOCH_MICROSECOND":
 540                    scale = 10**6
 541                elif name == "EPOCH_NANOSECOND":
 542                    scale = 10**9
 543                else:
 544                    scale = None
 545
 546                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 547                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 548
 549                if scale:
 550                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 551
 552                return to_unix
 553
 554            return self.expression(exp.Extract, this=this, expression=expression)
 555
 556        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 557            if is_map:
 558                # Keys are strings in Snowflake's objects, see also:
 559                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 560                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 561                return self._parse_slice(self._parse_string())
 562
 563            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 564
 565        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 566            lateral = super()._parse_lateral()
 567            if not lateral:
 568                return lateral
 569
 570            if isinstance(lateral.this, exp.Explode):
 571                table_alias = lateral.args.get("alias")
 572                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 573                if table_alias and not table_alias.args.get("columns"):
 574                    table_alias.set("columns", columns)
 575                elif not table_alias:
 576                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 577
 578            return lateral
 579
 580        def _parse_table_parts(
 581            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 582        ) -> exp.Table:
 583            # https://docs.snowflake.com/en/user-guide/querying-stage
 584            if self._match(TokenType.STRING, advance=False):
 585                table = self._parse_string()
 586            elif self._match_text_seq("@", advance=False):
 587                table = self._parse_location_path()
 588            else:
 589                table = None
 590
 591            if table:
 592                file_format = None
 593                pattern = None
 594
 595                wrapped = self._match(TokenType.L_PAREN)
 596                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 597                    if self._match_text_seq("FILE_FORMAT", "=>"):
 598                        file_format = self._parse_string() or super()._parse_table_parts(
 599                            is_db_reference=is_db_reference
 600                        )
 601                    elif self._match_text_seq("PATTERN", "=>"):
 602                        pattern = self._parse_string()
 603                    else:
 604                        break
 605
 606                    self._match(TokenType.COMMA)
 607
 608                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 609            else:
 610                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 611
 612            return table
 613
 614        def _parse_id_var(
 615            self,
 616            any_token: bool = True,
 617            tokens: t.Optional[t.Collection[TokenType]] = None,
 618        ) -> t.Optional[exp.Expression]:
 619            if self._match_text_seq("IDENTIFIER", "("):
 620                identifier = (
 621                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 622                    or self._parse_string()
 623                )
 624                self._match_r_paren()
 625                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 626
 627            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 628
 629        def _parse_show_snowflake(self, this: str) -> exp.Show:
 630            scope = None
 631            scope_kind = None
 632
 633            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 634            # which is syntactically valid but has no effect on the output
 635            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 636
 637            history = self._match_text_seq("HISTORY")
 638
 639            like = self._parse_string() if self._match(TokenType.LIKE) else None
 640
 641            if self._match(TokenType.IN):
 642                if self._match_text_seq("ACCOUNT"):
 643                    scope_kind = "ACCOUNT"
 644                elif self._match_set(self.DB_CREATABLES):
 645                    scope_kind = self._prev.text.upper()
 646                    if self._curr:
 647                        scope = self._parse_table_parts()
 648                elif self._curr:
 649                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 650                    scope = self._parse_table_parts()
 651
 652            return self.expression(
 653                exp.Show,
 654                **{
 655                    "terse": terse,
 656                    "this": this,
 657                    "history": history,
 658                    "like": like,
 659                    "scope": scope,
 660                    "scope_kind": scope_kind,
 661                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 662                    "limit": self._parse_limit(),
 663                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 664                },
 665            )
 666
 667        def _parse_location_property(self) -> exp.LocationProperty:
 668            self._match(TokenType.EQ)
 669            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 670
 671        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 672            # Parse either a subquery or a staged file
 673            return (
 674                self._parse_select(table=True, parse_subquery_alias=False)
 675                if self._match(TokenType.L_PAREN, advance=False)
 676                else self._parse_table_parts()
 677            )
 678
 679        def _parse_location_path(self) -> exp.Var:
 680            parts = [self._advance_any(ignore_reserved=True)]
 681
 682            # We avoid consuming a comma token because external tables like @foo and @bar
 683            # can be joined in a query with a comma separator, as well as closing paren
 684            # in case of subqueries
 685            while self._is_connected() and not self._match_set(
 686                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 687            ):
 688                parts.append(self._advance_any(ignore_reserved=True))
 689
 690            return exp.var("".join(part.text for part in parts if part))
 691
 692        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 693            this = super()._parse_lambda_arg()
 694
 695            if not this:
 696                return this
 697
 698            typ = self._parse_types()
 699
 700            if typ:
 701                return self.expression(exp.Cast, this=this, to=typ)
 702
 703            return this
 704
 705    class Tokenizer(tokens.Tokenizer):
 706        STRING_ESCAPES = ["\\", "'"]
 707        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 708        RAW_STRINGS = ["$$"]
 709        COMMENTS = ["--", "//", ("/*", "*/")]
 710        NESTED_COMMENTS = False
 711
 712        KEYWORDS = {
 713            **tokens.Tokenizer.KEYWORDS,
 714            "BYTEINT": TokenType.INT,
 715            "CHAR VARYING": TokenType.VARCHAR,
 716            "CHARACTER VARYING": TokenType.VARCHAR,
 717            "EXCLUDE": TokenType.EXCEPT,
 718            "ILIKE ANY": TokenType.ILIKE_ANY,
 719            "LIKE ANY": TokenType.LIKE_ANY,
 720            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 721            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 722            "MINUS": TokenType.EXCEPT,
 723            "NCHAR VARYING": TokenType.VARCHAR,
 724            "PUT": TokenType.COMMAND,
 725            "REMOVE": TokenType.COMMAND,
 726            "RM": TokenType.COMMAND,
 727            "SAMPLE": TokenType.TABLE_SAMPLE,
 728            "SQL_DOUBLE": TokenType.DOUBLE,
 729            "SQL_VARCHAR": TokenType.VARCHAR,
 730            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 731            "TAG": TokenType.TAG,
 732            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 733            "TOP": TokenType.TOP,
 734            "WAREHOUSE": TokenType.WAREHOUSE,
 735            "STREAMLIT": TokenType.STREAMLIT,
 736        }
 737        KEYWORDS.pop("/*+")
 738
 739        SINGLE_TOKENS = {
 740            **tokens.Tokenizer.SINGLE_TOKENS,
 741            "$": TokenType.PARAMETER,
 742        }
 743
 744        VAR_SINGLE_TOKENS = {"$"}
 745
 746        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 747
 748    class Generator(generator.Generator):
 749        PARAMETER_TOKEN = "$"
 750        MATCHED_BY_SOURCE = False
 751        SINGLE_STRING_INTERVAL = True
 752        JOIN_HINTS = False
 753        TABLE_HINTS = False
 754        QUERY_HINTS = False
 755        AGGREGATE_FILTER_SUPPORTED = False
 756        SUPPORTS_TABLE_COPY = False
 757        COLLATE_IS_FUNC = True
 758        LIMIT_ONLY_LITERALS = True
 759        JSON_KEY_VALUE_PAIR_SEP = ","
 760        INSERT_OVERWRITE = " OVERWRITE INTO"
 761        STRUCT_DELIMITER = ("(", ")")
 762        COPY_PARAMS_ARE_WRAPPED = False
 763        COPY_PARAMS_EQ_REQUIRED = True
 764        STAR_EXCEPT = "EXCLUDE"
 765        SUPPORTS_EXPLODING_PROJECTIONS = False
 766        ARRAY_CONCAT_IS_VAR_LEN = False
 767        SUPPORTS_CONVERT_TIMEZONE = True
 768        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 769
 770        TRANSFORMS = {
 771            **generator.Generator.TRANSFORMS,
 772            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 773            exp.ArgMax: rename_func("MAX_BY"),
 774            exp.ArgMin: rename_func("MIN_BY"),
 775            exp.Array: inline_array_sql,
 776            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 777            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 778            exp.AtTimeZone: lambda self, e: self.func(
 779                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 780            ),
 781            exp.BitwiseXor: rename_func("BITXOR"),
 782            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 783            exp.DateAdd: date_delta_sql("DATEADD"),
 784            exp.DateDiff: date_delta_sql("DATEDIFF"),
 785            exp.DateStrToDate: datestrtodate_sql,
 786            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 787            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 788            exp.DayOfYear: rename_func("DAYOFYEAR"),
 789            exp.Explode: rename_func("FLATTEN"),
 790            exp.Extract: rename_func("DATE_PART"),
 791            exp.FromTimeZone: lambda self, e: self.func(
 792                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 793            ),
 794            exp.GenerateSeries: lambda self, e: self.func(
 795                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 796            ),
 797            exp.GroupConcat: rename_func("LISTAGG"),
 798            exp.If: if_sql(name="IFF", false_value="NULL"),
 799            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 800            exp.JSONExtractScalar: lambda self, e: self.func(
 801                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 802            ),
 803            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 804            exp.JSONPathRoot: lambda *_: "",
 805            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 806            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 807            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 808            exp.Max: max_or_greatest,
 809            exp.Min: min_or_least,
 810            exp.ParseJSON: lambda self, e: self.func(
 811                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 812            ),
 813            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 814            exp.PercentileCont: transforms.preprocess(
 815                [transforms.add_within_group_for_percentiles]
 816            ),
 817            exp.PercentileDisc: transforms.preprocess(
 818                [transforms.add_within_group_for_percentiles]
 819            ),
 820            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 821            exp.RegexpILike: _regexpilike_sql,
 822            exp.Rand: rename_func("RANDOM"),
 823            exp.Select: transforms.preprocess(
 824                [
 825                    transforms.eliminate_distinct_on,
 826                    transforms.explode_to_unnest(),
 827                    transforms.eliminate_semi_and_anti_joins,
 828                    _unnest_generate_date_array,
 829                ]
 830            ),
 831            exp.SHA: rename_func("SHA1"),
 832            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 833            exp.StartsWith: rename_func("STARTSWITH"),
 834            exp.StrPosition: lambda self, e: self.func(
 835                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 836            ),
 837            exp.Stuff: rename_func("INSERT"),
 838            exp.TimeAdd: date_delta_sql("TIMEADD"),
 839            exp.TimestampDiff: lambda self, e: self.func(
 840                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 841            ),
 842            exp.TimestampTrunc: timestamptrunc_sql(),
 843            exp.TimeStrToTime: timestrtotime_sql,
 844            exp.TimeToStr: lambda self, e: self.func(
 845                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 846            ),
 847            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 848            exp.ToArray: rename_func("TO_ARRAY"),
 849            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 850            exp.ToDouble: rename_func("TO_DOUBLE"),
 851            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 852            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 853            exp.TsOrDsToDate: lambda self, e: self.func(
 854                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 855            ),
 856            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 857            exp.Uuid: rename_func("UUID_STRING"),
 858            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 859            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 860            exp.Xor: rename_func("BOOLXOR"),
 861        }
 862
 863        SUPPORTED_JSON_PATH_PARTS = {
 864            exp.JSONPathKey,
 865            exp.JSONPathRoot,
 866            exp.JSONPathSubscript,
 867        }
 868
 869        TYPE_MAPPING = {
 870            **generator.Generator.TYPE_MAPPING,
 871            exp.DataType.Type.NESTED: "OBJECT",
 872            exp.DataType.Type.STRUCT: "OBJECT",
 873        }
 874
 875        PROPERTIES_LOCATION = {
 876            **generator.Generator.PROPERTIES_LOCATION,
 877            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 878            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 879        }
 880
 881        UNSUPPORTED_VALUES_EXPRESSIONS = {
 882            exp.Map,
 883            exp.StarMap,
 884            exp.Struct,
 885            exp.VarMap,
 886        }
 887
 888        def with_properties(self, properties: exp.Properties) -> str:
 889            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 890
 891        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 892            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 893                values_as_table = False
 894
 895            return super().values_sql(expression, values_as_table=values_as_table)
 896
 897        def datatype_sql(self, expression: exp.DataType) -> str:
 898            expressions = expression.expressions
 899            if (
 900                expressions
 901                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 902                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 903            ):
 904                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 905                return "OBJECT"
 906
 907            return super().datatype_sql(expression)
 908
 909        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 910            return self.func(
 911                "TO_NUMBER",
 912                expression.this,
 913                expression.args.get("format"),
 914                expression.args.get("precision"),
 915                expression.args.get("scale"),
 916            )
 917
 918        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 919            milli = expression.args.get("milli")
 920            if milli is not None:
 921                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 922                expression.set("nano", milli_to_nano)
 923
 924            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 925
 926        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 927            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 928                return self.func("TO_GEOGRAPHY", expression.this)
 929            if expression.is_type(exp.DataType.Type.GEOMETRY):
 930                return self.func("TO_GEOMETRY", expression.this)
 931
 932            return super().cast_sql(expression, safe_prefix=safe_prefix)
 933
 934        def trycast_sql(self, expression: exp.TryCast) -> str:
 935            value = expression.this
 936
 937            if value.type is None:
 938                from sqlglot.optimizer.annotate_types import annotate_types
 939
 940                value = annotate_types(value)
 941
 942            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 943                return super().trycast_sql(expression)
 944
 945            # TRY_CAST only works for string values in Snowflake
 946            return self.cast_sql(expression)
 947
 948        def log_sql(self, expression: exp.Log) -> str:
 949            if not expression.expression:
 950                return self.func("LN", expression.this)
 951
 952            return super().log_sql(expression)
 953
 954        def unnest_sql(self, expression: exp.Unnest) -> str:
 955            unnest_alias = expression.args.get("alias")
 956            offset = expression.args.get("offset")
 957
 958            columns = [
 959                exp.to_identifier("seq"),
 960                exp.to_identifier("key"),
 961                exp.to_identifier("path"),
 962                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 963                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 964                or exp.to_identifier("value"),
 965                exp.to_identifier("this"),
 966            ]
 967
 968            if unnest_alias:
 969                unnest_alias.set("columns", columns)
 970            else:
 971                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 972
 973            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 974            alias = self.sql(unnest_alias)
 975            alias = f" AS {alias}" if alias else ""
 976            return f"{explode}{alias}"
 977
 978        def show_sql(self, expression: exp.Show) -> str:
 979            terse = "TERSE " if expression.args.get("terse") else ""
 980            history = " HISTORY" if expression.args.get("history") else ""
 981            like = self.sql(expression, "like")
 982            like = f" LIKE {like}" if like else ""
 983
 984            scope = self.sql(expression, "scope")
 985            scope = f" {scope}" if scope else ""
 986
 987            scope_kind = self.sql(expression, "scope_kind")
 988            if scope_kind:
 989                scope_kind = f" IN {scope_kind}"
 990
 991            starts_with = self.sql(expression, "starts_with")
 992            if starts_with:
 993                starts_with = f" STARTS WITH {starts_with}"
 994
 995            limit = self.sql(expression, "limit")
 996
 997            from_ = self.sql(expression, "from")
 998            if from_:
 999                from_ = f" FROM {from_}"
1000
1001            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1002
1003        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1004            # Other dialects don't support all of the following parameters, so we need to
1005            # generate default values as necessary to ensure the transpilation is correct
1006            group = expression.args.get("group")
1007
1008            # To avoid generating all these default values, we set group to None if
1009            # it's 0 (also default value) which doesn't trigger the following chain
1010            if group and group.name == "0":
1011                group = None
1012
1013            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1014            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1015            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1016
1017            return self.func(
1018                "REGEXP_SUBSTR",
1019                expression.this,
1020                expression.expression,
1021                position,
1022                occurrence,
1023                parameters,
1024                group,
1025            )
1026
1027        def describe_sql(self, expression: exp.Describe) -> str:
1028            # Default to table if kind is unknown
1029            kind_value = expression.args.get("kind") or "TABLE"
1030            kind = f" {kind_value}" if kind_value else ""
1031            this = f" {self.sql(expression, 'this')}"
1032            expressions = self.expressions(expression, flat=True)
1033            expressions = f" {expressions}" if expressions else ""
1034            return f"DESCRIBE{kind}{this}{expressions}"
1035
1036        def generatedasidentitycolumnconstraint_sql(
1037            self, expression: exp.GeneratedAsIdentityColumnConstraint
1038        ) -> str:
1039            start = expression.args.get("start")
1040            start = f" START {start}" if start else ""
1041            increment = expression.args.get("increment")
1042            increment = f" INCREMENT {increment}" if increment else ""
1043            return f"AUTOINCREMENT{start}{increment}"
1044
1045        def cluster_sql(self, expression: exp.Cluster) -> str:
1046            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1047
1048        def struct_sql(self, expression: exp.Struct) -> str:
1049            keys = []
1050            values = []
1051
1052            for i, e in enumerate(expression.expressions):
1053                if isinstance(e, exp.PropertyEQ):
1054                    keys.append(
1055                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1056                    )
1057                    values.append(e.expression)
1058                else:
1059                    keys.append(exp.Literal.string(f"_{i}"))
1060                    values.append(e)
1061
1062            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1063
1064        @generator.unsupported_args("weight", "accuracy")
1065        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1066            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1067
1068        def alterset_sql(self, expression: exp.AlterSet) -> str:
1069            exprs = self.expressions(expression, flat=True)
1070            exprs = f" {exprs}" if exprs else ""
1071            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1072            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1073            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1074            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1075            tag = self.expressions(expression, key="tag", flat=True)
1076            tag = f" TAG {tag}" if tag else ""
1077
1078            return f"SET{exprs}{file_format}{copy_options}{tag}"
1079
1080        def strtotime_sql(self, expression: exp.StrToTime):
1081            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1082            return self.func(
1083                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1084            )
class Snowflake(sqlglot.dialects.dialect.Dialect):
 235class Snowflake(Dialect):
 236    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 237    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 238    NULL_ORDERING = "nulls_are_large"
 239    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 240    SUPPORTS_USER_DEFINED_TYPES = False
 241    SUPPORTS_SEMI_ANTI_JOIN = False
 242    PREFER_CTE_ALIAS_COLUMN = True
 243    TABLESAMPLE_SIZE_IS_PERCENT = True
 244    COPY_PARAMS_ARE_CSV = False
 245    ARRAY_AGG_INCLUDES_NULLS = None
 246
 247    TIME_MAPPING = {
 248        "YYYY": "%Y",
 249        "yyyy": "%Y",
 250        "YY": "%y",
 251        "yy": "%y",
 252        "MMMM": "%B",
 253        "mmmm": "%B",
 254        "MON": "%b",
 255        "mon": "%b",
 256        "MM": "%m",
 257        "mm": "%m",
 258        "DD": "%d",
 259        "dd": "%-d",
 260        "DY": "%a",
 261        "dy": "%w",
 262        "HH24": "%H",
 263        "hh24": "%H",
 264        "HH12": "%I",
 265        "hh12": "%I",
 266        "MI": "%M",
 267        "mi": "%M",
 268        "SS": "%S",
 269        "ss": "%S",
 270        "FF": "%f",
 271        "ff": "%f",
 272        "FF6": "%f",
 273        "ff6": "%f",
 274    }
 275
 276    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 277        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 278        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 279        if (
 280            isinstance(expression, exp.Identifier)
 281            and isinstance(expression.parent, exp.Table)
 282            and expression.name.lower() == "dual"
 283        ):
 284            return expression  # type: ignore
 285
 286        return super().quote_identifier(expression, identify=identify)
 287
 288    class Parser(parser.Parser):
 289        IDENTIFY_PIVOT_STRINGS = True
 290        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 291        COLON_IS_VARIANT_EXTRACT = True
 292
 293        ID_VAR_TOKENS = {
 294            *parser.Parser.ID_VAR_TOKENS,
 295            TokenType.MATCH_CONDITION,
 296        }
 297
 298        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 299        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 300
 301        FUNCTIONS = {
 302            **parser.Parser.FUNCTIONS,
 303            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 304            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 305            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 306                this=seq_get(args, 1), expression=seq_get(args, 0)
 307            ),
 308            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 309                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 310                start=seq_get(args, 0),
 311                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 312                step=seq_get(args, 2),
 313            ),
 314            "BITXOR": binary_from_function(exp.BitwiseXor),
 315            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 316            "BOOLXOR": binary_from_function(exp.Xor),
 317            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 318            "DATE_TRUNC": _date_trunc_to_time,
 319            "DATEADD": _build_date_time_add(exp.DateAdd),
 320            "DATEDIFF": _build_datediff,
 321            "DIV0": _build_if_from_div0,
 322            "FLATTEN": exp.Explode.from_arg_list,
 323            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 324                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 325            ),
 326            "IFF": exp.If.from_arg_list,
 327            "LAST_DAY": lambda args: exp.LastDay(
 328                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 329            ),
 330            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 331            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 332            "LISTAGG": exp.GroupConcat.from_arg_list,
 333            "MEDIAN": lambda args: exp.PercentileCont(
 334                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 335            ),
 336            "NULLIFZERO": _build_if_from_nullifzero,
 337            "OBJECT_CONSTRUCT": _build_object_construct,
 338            "REGEXP_REPLACE": _build_regexp_replace,
 339            "REGEXP_SUBSTR": lambda args: exp.RegexpExtract(
 340                this=seq_get(args, 0),
 341                expression=seq_get(args, 1),
 342                position=seq_get(args, 2),
 343                occurrence=seq_get(args, 3),
 344                parameters=seq_get(args, 4),
 345                group=seq_get(args, 5) or exp.Literal.number(0),
 346            ),
 347            "RLIKE": exp.RegexpLike.from_arg_list,
 348            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 349            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 350            "TIMEDIFF": _build_datediff,
 351            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 352            "TIMESTAMPDIFF": _build_datediff,
 353            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 354            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 355            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 356            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 357            "TRY_TO_TIMESTAMP": _build_datetime(
 358                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 359            ),
 360            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 361            "TO_NUMBER": lambda args: exp.ToNumber(
 362                this=seq_get(args, 0),
 363                format=seq_get(args, 1),
 364                precision=seq_get(args, 2),
 365                scale=seq_get(args, 3),
 366            ),
 367            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 368            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 369            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 370            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 371            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 372            "TO_VARCHAR": exp.ToChar.from_arg_list,
 373            "ZEROIFNULL": _build_if_from_zeroifnull,
 374        }
 375
 376        FUNCTION_PARSERS = {
 377            **parser.Parser.FUNCTION_PARSERS,
 378            "DATE_PART": lambda self: self._parse_date_part(),
 379            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 380        }
 381        FUNCTION_PARSERS.pop("TRIM")
 382
 383        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 384
 385        RANGE_PARSERS = {
 386            **parser.Parser.RANGE_PARSERS,
 387            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 388            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 389        }
 390
 391        ALTER_PARSERS = {
 392            **parser.Parser.ALTER_PARSERS,
 393            "UNSET": lambda self: self.expression(
 394                exp.Set,
 395                tag=self._match_text_seq("TAG"),
 396                expressions=self._parse_csv(self._parse_id_var),
 397                unset=True,
 398            ),
 399        }
 400
 401        STATEMENT_PARSERS = {
 402            **parser.Parser.STATEMENT_PARSERS,
 403            TokenType.SHOW: lambda self: self._parse_show(),
 404        }
 405
 406        PROPERTY_PARSERS = {
 407            **parser.Parser.PROPERTY_PARSERS,
 408            "LOCATION": lambda self: self._parse_location_property(),
 409        }
 410
 411        TYPE_CONVERTERS = {
 412            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 413            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 414        }
 415
 416        SHOW_PARSERS = {
 417            "SCHEMAS": _show_parser("SCHEMAS"),
 418            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 419            "OBJECTS": _show_parser("OBJECTS"),
 420            "TERSE OBJECTS": _show_parser("OBJECTS"),
 421            "TABLES": _show_parser("TABLES"),
 422            "TERSE TABLES": _show_parser("TABLES"),
 423            "VIEWS": _show_parser("VIEWS"),
 424            "TERSE VIEWS": _show_parser("VIEWS"),
 425            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 426            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 427            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 428            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 429            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 430            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 431            "SEQUENCES": _show_parser("SEQUENCES"),
 432            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 433            "COLUMNS": _show_parser("COLUMNS"),
 434            "USERS": _show_parser("USERS"),
 435            "TERSE USERS": _show_parser("USERS"),
 436        }
 437
 438        CONSTRAINT_PARSERS = {
 439            **parser.Parser.CONSTRAINT_PARSERS,
 440            "WITH": lambda self: self._parse_with_constraint(),
 441            "MASKING": lambda self: self._parse_with_constraint(),
 442            "PROJECTION": lambda self: self._parse_with_constraint(),
 443            "TAG": lambda self: self._parse_with_constraint(),
 444        }
 445
 446        STAGED_FILE_SINGLE_TOKENS = {
 447            TokenType.DOT,
 448            TokenType.MOD,
 449            TokenType.SLASH,
 450        }
 451
 452        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 453
 454        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 455
 456        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 457
 458        LAMBDAS = {
 459            **parser.Parser.LAMBDAS,
 460            TokenType.ARROW: lambda self, expressions: self.expression(
 461                exp.Lambda,
 462                this=self._replace_lambda(
 463                    self._parse_assignment(),
 464                    expressions,
 465                ),
 466                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 467            ),
 468        }
 469
 470        def _negate_range(
 471            self, this: t.Optional[exp.Expression] = None
 472        ) -> t.Optional[exp.Expression]:
 473            if not this:
 474                return this
 475
 476            query = this.args.get("query")
 477            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 478                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 479                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 480                # which can produce different results (most likely a SnowFlake bug).
 481                #
 482                # https://docs.snowflake.com/en/sql-reference/functions/in
 483                # Context: https://github.com/tobymao/sqlglot/issues/3890
 484                return self.expression(
 485                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 486                )
 487
 488            return self.expression(exp.Not, this=this)
 489
 490        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 491            if self._prev.token_type != TokenType.WITH:
 492                self._retreat(self._index - 1)
 493
 494            if self._match_text_seq("MASKING", "POLICY"):
 495                policy = self._parse_column()
 496                return self.expression(
 497                    exp.MaskingPolicyColumnConstraint,
 498                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 499                    expressions=self._match(TokenType.USING)
 500                    and self._parse_wrapped_csv(self._parse_id_var),
 501                )
 502            if self._match_text_seq("PROJECTION", "POLICY"):
 503                policy = self._parse_column()
 504                return self.expression(
 505                    exp.ProjectionPolicyColumnConstraint,
 506                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 507                )
 508            if self._match(TokenType.TAG):
 509                return self.expression(
 510                    exp.TagColumnConstraint,
 511                    expressions=self._parse_wrapped_csv(self._parse_property),
 512                )
 513
 514            return None
 515
 516        def _parse_create(self) -> exp.Create | exp.Command:
 517            expression = super()._parse_create()
 518            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 519                # Replace the Table node with the enclosed Identifier
 520                expression.this.replace(expression.this.this)
 521
 522            return expression
 523
 524        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 525        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 526        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 527            this = self._parse_var() or self._parse_type()
 528
 529            if not this:
 530                return None
 531
 532            self._match(TokenType.COMMA)
 533            expression = self._parse_bitwise()
 534            this = map_date_part(this)
 535            name = this.name.upper()
 536
 537            if name.startswith("EPOCH"):
 538                if name == "EPOCH_MILLISECOND":
 539                    scale = 10**3
 540                elif name == "EPOCH_MICROSECOND":
 541                    scale = 10**6
 542                elif name == "EPOCH_NANOSECOND":
 543                    scale = 10**9
 544                else:
 545                    scale = None
 546
 547                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 548                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 549
 550                if scale:
 551                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 552
 553                return to_unix
 554
 555            return self.expression(exp.Extract, this=this, expression=expression)
 556
 557        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 558            if is_map:
 559                # Keys are strings in Snowflake's objects, see also:
 560                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 561                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 562                return self._parse_slice(self._parse_string())
 563
 564            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 565
 566        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 567            lateral = super()._parse_lateral()
 568            if not lateral:
 569                return lateral
 570
 571            if isinstance(lateral.this, exp.Explode):
 572                table_alias = lateral.args.get("alias")
 573                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 574                if table_alias and not table_alias.args.get("columns"):
 575                    table_alias.set("columns", columns)
 576                elif not table_alias:
 577                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 578
 579            return lateral
 580
 581        def _parse_table_parts(
 582            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 583        ) -> exp.Table:
 584            # https://docs.snowflake.com/en/user-guide/querying-stage
 585            if self._match(TokenType.STRING, advance=False):
 586                table = self._parse_string()
 587            elif self._match_text_seq("@", advance=False):
 588                table = self._parse_location_path()
 589            else:
 590                table = None
 591
 592            if table:
 593                file_format = None
 594                pattern = None
 595
 596                wrapped = self._match(TokenType.L_PAREN)
 597                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 598                    if self._match_text_seq("FILE_FORMAT", "=>"):
 599                        file_format = self._parse_string() or super()._parse_table_parts(
 600                            is_db_reference=is_db_reference
 601                        )
 602                    elif self._match_text_seq("PATTERN", "=>"):
 603                        pattern = self._parse_string()
 604                    else:
 605                        break
 606
 607                    self._match(TokenType.COMMA)
 608
 609                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 610            else:
 611                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 612
 613            return table
 614
 615        def _parse_id_var(
 616            self,
 617            any_token: bool = True,
 618            tokens: t.Optional[t.Collection[TokenType]] = None,
 619        ) -> t.Optional[exp.Expression]:
 620            if self._match_text_seq("IDENTIFIER", "("):
 621                identifier = (
 622                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 623                    or self._parse_string()
 624                )
 625                self._match_r_paren()
 626                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 627
 628            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 629
 630        def _parse_show_snowflake(self, this: str) -> exp.Show:
 631            scope = None
 632            scope_kind = None
 633
 634            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 635            # which is syntactically valid but has no effect on the output
 636            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 637
 638            history = self._match_text_seq("HISTORY")
 639
 640            like = self._parse_string() if self._match(TokenType.LIKE) else None
 641
 642            if self._match(TokenType.IN):
 643                if self._match_text_seq("ACCOUNT"):
 644                    scope_kind = "ACCOUNT"
 645                elif self._match_set(self.DB_CREATABLES):
 646                    scope_kind = self._prev.text.upper()
 647                    if self._curr:
 648                        scope = self._parse_table_parts()
 649                elif self._curr:
 650                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 651                    scope = self._parse_table_parts()
 652
 653            return self.expression(
 654                exp.Show,
 655                **{
 656                    "terse": terse,
 657                    "this": this,
 658                    "history": history,
 659                    "like": like,
 660                    "scope": scope,
 661                    "scope_kind": scope_kind,
 662                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 663                    "limit": self._parse_limit(),
 664                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 665                },
 666            )
 667
 668        def _parse_location_property(self) -> exp.LocationProperty:
 669            self._match(TokenType.EQ)
 670            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 671
 672        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 673            # Parse either a subquery or a staged file
 674            return (
 675                self._parse_select(table=True, parse_subquery_alias=False)
 676                if self._match(TokenType.L_PAREN, advance=False)
 677                else self._parse_table_parts()
 678            )
 679
 680        def _parse_location_path(self) -> exp.Var:
 681            parts = [self._advance_any(ignore_reserved=True)]
 682
 683            # We avoid consuming a comma token because external tables like @foo and @bar
 684            # can be joined in a query with a comma separator, as well as closing paren
 685            # in case of subqueries
 686            while self._is_connected() and not self._match_set(
 687                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 688            ):
 689                parts.append(self._advance_any(ignore_reserved=True))
 690
 691            return exp.var("".join(part.text for part in parts if part))
 692
 693        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 694            this = super()._parse_lambda_arg()
 695
 696            if not this:
 697                return this
 698
 699            typ = self._parse_types()
 700
 701            if typ:
 702                return self.expression(exp.Cast, this=this, to=typ)
 703
 704            return this
 705
 706    class Tokenizer(tokens.Tokenizer):
 707        STRING_ESCAPES = ["\\", "'"]
 708        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 709        RAW_STRINGS = ["$$"]
 710        COMMENTS = ["--", "//", ("/*", "*/")]
 711        NESTED_COMMENTS = False
 712
 713        KEYWORDS = {
 714            **tokens.Tokenizer.KEYWORDS,
 715            "BYTEINT": TokenType.INT,
 716            "CHAR VARYING": TokenType.VARCHAR,
 717            "CHARACTER VARYING": TokenType.VARCHAR,
 718            "EXCLUDE": TokenType.EXCEPT,
 719            "ILIKE ANY": TokenType.ILIKE_ANY,
 720            "LIKE ANY": TokenType.LIKE_ANY,
 721            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 722            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 723            "MINUS": TokenType.EXCEPT,
 724            "NCHAR VARYING": TokenType.VARCHAR,
 725            "PUT": TokenType.COMMAND,
 726            "REMOVE": TokenType.COMMAND,
 727            "RM": TokenType.COMMAND,
 728            "SAMPLE": TokenType.TABLE_SAMPLE,
 729            "SQL_DOUBLE": TokenType.DOUBLE,
 730            "SQL_VARCHAR": TokenType.VARCHAR,
 731            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 732            "TAG": TokenType.TAG,
 733            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 734            "TOP": TokenType.TOP,
 735            "WAREHOUSE": TokenType.WAREHOUSE,
 736            "STREAMLIT": TokenType.STREAMLIT,
 737        }
 738        KEYWORDS.pop("/*+")
 739
 740        SINGLE_TOKENS = {
 741            **tokens.Tokenizer.SINGLE_TOKENS,
 742            "$": TokenType.PARAMETER,
 743        }
 744
 745        VAR_SINGLE_TOKENS = {"$"}
 746
 747        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 748
 749    class Generator(generator.Generator):
 750        PARAMETER_TOKEN = "$"
 751        MATCHED_BY_SOURCE = False
 752        SINGLE_STRING_INTERVAL = True
 753        JOIN_HINTS = False
 754        TABLE_HINTS = False
 755        QUERY_HINTS = False
 756        AGGREGATE_FILTER_SUPPORTED = False
 757        SUPPORTS_TABLE_COPY = False
 758        COLLATE_IS_FUNC = True
 759        LIMIT_ONLY_LITERALS = True
 760        JSON_KEY_VALUE_PAIR_SEP = ","
 761        INSERT_OVERWRITE = " OVERWRITE INTO"
 762        STRUCT_DELIMITER = ("(", ")")
 763        COPY_PARAMS_ARE_WRAPPED = False
 764        COPY_PARAMS_EQ_REQUIRED = True
 765        STAR_EXCEPT = "EXCLUDE"
 766        SUPPORTS_EXPLODING_PROJECTIONS = False
 767        ARRAY_CONCAT_IS_VAR_LEN = False
 768        SUPPORTS_CONVERT_TIMEZONE = True
 769        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 770
 771        TRANSFORMS = {
 772            **generator.Generator.TRANSFORMS,
 773            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 774            exp.ArgMax: rename_func("MAX_BY"),
 775            exp.ArgMin: rename_func("MIN_BY"),
 776            exp.Array: inline_array_sql,
 777            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 778            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 779            exp.AtTimeZone: lambda self, e: self.func(
 780                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 781            ),
 782            exp.BitwiseXor: rename_func("BITXOR"),
 783            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 784            exp.DateAdd: date_delta_sql("DATEADD"),
 785            exp.DateDiff: date_delta_sql("DATEDIFF"),
 786            exp.DateStrToDate: datestrtodate_sql,
 787            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 788            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 789            exp.DayOfYear: rename_func("DAYOFYEAR"),
 790            exp.Explode: rename_func("FLATTEN"),
 791            exp.Extract: rename_func("DATE_PART"),
 792            exp.FromTimeZone: lambda self, e: self.func(
 793                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 794            ),
 795            exp.GenerateSeries: lambda self, e: self.func(
 796                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 797            ),
 798            exp.GroupConcat: rename_func("LISTAGG"),
 799            exp.If: if_sql(name="IFF", false_value="NULL"),
 800            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 801            exp.JSONExtractScalar: lambda self, e: self.func(
 802                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 803            ),
 804            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 805            exp.JSONPathRoot: lambda *_: "",
 806            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 807            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 808            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 809            exp.Max: max_or_greatest,
 810            exp.Min: min_or_least,
 811            exp.ParseJSON: lambda self, e: self.func(
 812                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 813            ),
 814            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 815            exp.PercentileCont: transforms.preprocess(
 816                [transforms.add_within_group_for_percentiles]
 817            ),
 818            exp.PercentileDisc: transforms.preprocess(
 819                [transforms.add_within_group_for_percentiles]
 820            ),
 821            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 822            exp.RegexpILike: _regexpilike_sql,
 823            exp.Rand: rename_func("RANDOM"),
 824            exp.Select: transforms.preprocess(
 825                [
 826                    transforms.eliminate_distinct_on,
 827                    transforms.explode_to_unnest(),
 828                    transforms.eliminate_semi_and_anti_joins,
 829                    _unnest_generate_date_array,
 830                ]
 831            ),
 832            exp.SHA: rename_func("SHA1"),
 833            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 834            exp.StartsWith: rename_func("STARTSWITH"),
 835            exp.StrPosition: lambda self, e: self.func(
 836                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 837            ),
 838            exp.Stuff: rename_func("INSERT"),
 839            exp.TimeAdd: date_delta_sql("TIMEADD"),
 840            exp.TimestampDiff: lambda self, e: self.func(
 841                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 842            ),
 843            exp.TimestampTrunc: timestamptrunc_sql(),
 844            exp.TimeStrToTime: timestrtotime_sql,
 845            exp.TimeToStr: lambda self, e: self.func(
 846                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 847            ),
 848            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 849            exp.ToArray: rename_func("TO_ARRAY"),
 850            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 851            exp.ToDouble: rename_func("TO_DOUBLE"),
 852            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 853            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 854            exp.TsOrDsToDate: lambda self, e: self.func(
 855                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 856            ),
 857            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 858            exp.Uuid: rename_func("UUID_STRING"),
 859            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 860            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 861            exp.Xor: rename_func("BOOLXOR"),
 862        }
 863
 864        SUPPORTED_JSON_PATH_PARTS = {
 865            exp.JSONPathKey,
 866            exp.JSONPathRoot,
 867            exp.JSONPathSubscript,
 868        }
 869
 870        TYPE_MAPPING = {
 871            **generator.Generator.TYPE_MAPPING,
 872            exp.DataType.Type.NESTED: "OBJECT",
 873            exp.DataType.Type.STRUCT: "OBJECT",
 874        }
 875
 876        PROPERTIES_LOCATION = {
 877            **generator.Generator.PROPERTIES_LOCATION,
 878            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 879            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 880        }
 881
 882        UNSUPPORTED_VALUES_EXPRESSIONS = {
 883            exp.Map,
 884            exp.StarMap,
 885            exp.Struct,
 886            exp.VarMap,
 887        }
 888
 889        def with_properties(self, properties: exp.Properties) -> str:
 890            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 891
 892        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 893            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 894                values_as_table = False
 895
 896            return super().values_sql(expression, values_as_table=values_as_table)
 897
 898        def datatype_sql(self, expression: exp.DataType) -> str:
 899            expressions = expression.expressions
 900            if (
 901                expressions
 902                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 903                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 904            ):
 905                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 906                return "OBJECT"
 907
 908            return super().datatype_sql(expression)
 909
 910        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 911            return self.func(
 912                "TO_NUMBER",
 913                expression.this,
 914                expression.args.get("format"),
 915                expression.args.get("precision"),
 916                expression.args.get("scale"),
 917            )
 918
 919        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 920            milli = expression.args.get("milli")
 921            if milli is not None:
 922                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 923                expression.set("nano", milli_to_nano)
 924
 925            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 926
 927        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 928            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 929                return self.func("TO_GEOGRAPHY", expression.this)
 930            if expression.is_type(exp.DataType.Type.GEOMETRY):
 931                return self.func("TO_GEOMETRY", expression.this)
 932
 933            return super().cast_sql(expression, safe_prefix=safe_prefix)
 934
 935        def trycast_sql(self, expression: exp.TryCast) -> str:
 936            value = expression.this
 937
 938            if value.type is None:
 939                from sqlglot.optimizer.annotate_types import annotate_types
 940
 941                value = annotate_types(value)
 942
 943            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 944                return super().trycast_sql(expression)
 945
 946            # TRY_CAST only works for string values in Snowflake
 947            return self.cast_sql(expression)
 948
 949        def log_sql(self, expression: exp.Log) -> str:
 950            if not expression.expression:
 951                return self.func("LN", expression.this)
 952
 953            return super().log_sql(expression)
 954
 955        def unnest_sql(self, expression: exp.Unnest) -> str:
 956            unnest_alias = expression.args.get("alias")
 957            offset = expression.args.get("offset")
 958
 959            columns = [
 960                exp.to_identifier("seq"),
 961                exp.to_identifier("key"),
 962                exp.to_identifier("path"),
 963                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 964                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 965                or exp.to_identifier("value"),
 966                exp.to_identifier("this"),
 967            ]
 968
 969            if unnest_alias:
 970                unnest_alias.set("columns", columns)
 971            else:
 972                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 973
 974            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 975            alias = self.sql(unnest_alias)
 976            alias = f" AS {alias}" if alias else ""
 977            return f"{explode}{alias}"
 978
 979        def show_sql(self, expression: exp.Show) -> str:
 980            terse = "TERSE " if expression.args.get("terse") else ""
 981            history = " HISTORY" if expression.args.get("history") else ""
 982            like = self.sql(expression, "like")
 983            like = f" LIKE {like}" if like else ""
 984
 985            scope = self.sql(expression, "scope")
 986            scope = f" {scope}" if scope else ""
 987
 988            scope_kind = self.sql(expression, "scope_kind")
 989            if scope_kind:
 990                scope_kind = f" IN {scope_kind}"
 991
 992            starts_with = self.sql(expression, "starts_with")
 993            if starts_with:
 994                starts_with = f" STARTS WITH {starts_with}"
 995
 996            limit = self.sql(expression, "limit")
 997
 998            from_ = self.sql(expression, "from")
 999            if from_:
1000                from_ = f" FROM {from_}"
1001
1002            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1003
1004        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1005            # Other dialects don't support all of the following parameters, so we need to
1006            # generate default values as necessary to ensure the transpilation is correct
1007            group = expression.args.get("group")
1008
1009            # To avoid generating all these default values, we set group to None if
1010            # it's 0 (also default value) which doesn't trigger the following chain
1011            if group and group.name == "0":
1012                group = None
1013
1014            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1015            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1016            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1017
1018            return self.func(
1019                "REGEXP_SUBSTR",
1020                expression.this,
1021                expression.expression,
1022                position,
1023                occurrence,
1024                parameters,
1025                group,
1026            )
1027
1028        def describe_sql(self, expression: exp.Describe) -> str:
1029            # Default to table if kind is unknown
1030            kind_value = expression.args.get("kind") or "TABLE"
1031            kind = f" {kind_value}" if kind_value else ""
1032            this = f" {self.sql(expression, 'this')}"
1033            expressions = self.expressions(expression, flat=True)
1034            expressions = f" {expressions}" if expressions else ""
1035            return f"DESCRIBE{kind}{this}{expressions}"
1036
1037        def generatedasidentitycolumnconstraint_sql(
1038            self, expression: exp.GeneratedAsIdentityColumnConstraint
1039        ) -> str:
1040            start = expression.args.get("start")
1041            start = f" START {start}" if start else ""
1042            increment = expression.args.get("increment")
1043            increment = f" INCREMENT {increment}" if increment else ""
1044            return f"AUTOINCREMENT{start}{increment}"
1045
1046        def cluster_sql(self, expression: exp.Cluster) -> str:
1047            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1048
1049        def struct_sql(self, expression: exp.Struct) -> str:
1050            keys = []
1051            values = []
1052
1053            for i, e in enumerate(expression.expressions):
1054                if isinstance(e, exp.PropertyEQ):
1055                    keys.append(
1056                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1057                    )
1058                    values.append(e.expression)
1059                else:
1060                    keys.append(exp.Literal.string(f"_{i}"))
1061                    values.append(e)
1062
1063            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1064
1065        @generator.unsupported_args("weight", "accuracy")
1066        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1067            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1068
1069        def alterset_sql(self, expression: exp.AlterSet) -> str:
1070            exprs = self.expressions(expression, flat=True)
1071            exprs = f" {exprs}" if exprs else ""
1072            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1073            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1074            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1075            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1076            tag = self.expressions(expression, key="tag", flat=True)
1077            tag = f" TAG {tag}" if tag else ""
1078
1079            return f"SET{exprs}{file_format}{copy_options}{tag}"
1080
1081        def strtotime_sql(self, expression: exp.StrToTime):
1082            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1083            return self.func(
1084                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1085            )
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

ARRAY_AGG_INCLUDES_NULLS: Optional[bool] = None

Whether ArrayAgg needs to filter NULL values.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
276    def quote_identifier(self, expression: E, identify: bool = True) -> E:
277        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
278        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
279        if (
280            isinstance(expression, exp.Identifier)
281            and isinstance(expression.parent, exp.Table)
282            and expression.name.lower() == "dual"
283        ):
284            return expression  # type: ignore
285
286        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.tokens.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {}
INVERSE_FORMAT_TRIE: Dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
288    class Parser(parser.Parser):
289        IDENTIFY_PIVOT_STRINGS = True
290        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
291        COLON_IS_VARIANT_EXTRACT = True
292
293        ID_VAR_TOKENS = {
294            *parser.Parser.ID_VAR_TOKENS,
295            TokenType.MATCH_CONDITION,
296        }
297
298        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
299        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
300
301        FUNCTIONS = {
302            **parser.Parser.FUNCTIONS,
303            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
304            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
305            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
306                this=seq_get(args, 1), expression=seq_get(args, 0)
307            ),
308            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
309                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
310                start=seq_get(args, 0),
311                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
312                step=seq_get(args, 2),
313            ),
314            "BITXOR": binary_from_function(exp.BitwiseXor),
315            "BIT_XOR": binary_from_function(exp.BitwiseXor),
316            "BOOLXOR": binary_from_function(exp.Xor),
317            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
318            "DATE_TRUNC": _date_trunc_to_time,
319            "DATEADD": _build_date_time_add(exp.DateAdd),
320            "DATEDIFF": _build_datediff,
321            "DIV0": _build_if_from_div0,
322            "FLATTEN": exp.Explode.from_arg_list,
323            "GET_PATH": lambda args, dialect: exp.JSONExtract(
324                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
325            ),
326            "IFF": exp.If.from_arg_list,
327            "LAST_DAY": lambda args: exp.LastDay(
328                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
329            ),
330            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
331            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
332            "LISTAGG": exp.GroupConcat.from_arg_list,
333            "MEDIAN": lambda args: exp.PercentileCont(
334                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
335            ),
336            "NULLIFZERO": _build_if_from_nullifzero,
337            "OBJECT_CONSTRUCT": _build_object_construct,
338            "REGEXP_REPLACE": _build_regexp_replace,
339            "REGEXP_SUBSTR": lambda args: exp.RegexpExtract(
340                this=seq_get(args, 0),
341                expression=seq_get(args, 1),
342                position=seq_get(args, 2),
343                occurrence=seq_get(args, 3),
344                parameters=seq_get(args, 4),
345                group=seq_get(args, 5) or exp.Literal.number(0),
346            ),
347            "RLIKE": exp.RegexpLike.from_arg_list,
348            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
349            "TIMEADD": _build_date_time_add(exp.TimeAdd),
350            "TIMEDIFF": _build_datediff,
351            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
352            "TIMESTAMPDIFF": _build_datediff,
353            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
354            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
355            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
356            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
357            "TRY_TO_TIMESTAMP": _build_datetime(
358                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
359            ),
360            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
361            "TO_NUMBER": lambda args: exp.ToNumber(
362                this=seq_get(args, 0),
363                format=seq_get(args, 1),
364                precision=seq_get(args, 2),
365                scale=seq_get(args, 3),
366            ),
367            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
368            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
369            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
370            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
371            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
372            "TO_VARCHAR": exp.ToChar.from_arg_list,
373            "ZEROIFNULL": _build_if_from_zeroifnull,
374        }
375
376        FUNCTION_PARSERS = {
377            **parser.Parser.FUNCTION_PARSERS,
378            "DATE_PART": lambda self: self._parse_date_part(),
379            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
380        }
381        FUNCTION_PARSERS.pop("TRIM")
382
383        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
384
385        RANGE_PARSERS = {
386            **parser.Parser.RANGE_PARSERS,
387            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
388            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
389        }
390
391        ALTER_PARSERS = {
392            **parser.Parser.ALTER_PARSERS,
393            "UNSET": lambda self: self.expression(
394                exp.Set,
395                tag=self._match_text_seq("TAG"),
396                expressions=self._parse_csv(self._parse_id_var),
397                unset=True,
398            ),
399        }
400
401        STATEMENT_PARSERS = {
402            **parser.Parser.STATEMENT_PARSERS,
403            TokenType.SHOW: lambda self: self._parse_show(),
404        }
405
406        PROPERTY_PARSERS = {
407            **parser.Parser.PROPERTY_PARSERS,
408            "LOCATION": lambda self: self._parse_location_property(),
409        }
410
411        TYPE_CONVERTERS = {
412            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
413            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
414        }
415
416        SHOW_PARSERS = {
417            "SCHEMAS": _show_parser("SCHEMAS"),
418            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
419            "OBJECTS": _show_parser("OBJECTS"),
420            "TERSE OBJECTS": _show_parser("OBJECTS"),
421            "TABLES": _show_parser("TABLES"),
422            "TERSE TABLES": _show_parser("TABLES"),
423            "VIEWS": _show_parser("VIEWS"),
424            "TERSE VIEWS": _show_parser("VIEWS"),
425            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
426            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
427            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
428            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
429            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
430            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
431            "SEQUENCES": _show_parser("SEQUENCES"),
432            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
433            "COLUMNS": _show_parser("COLUMNS"),
434            "USERS": _show_parser("USERS"),
435            "TERSE USERS": _show_parser("USERS"),
436        }
437
438        CONSTRAINT_PARSERS = {
439            **parser.Parser.CONSTRAINT_PARSERS,
440            "WITH": lambda self: self._parse_with_constraint(),
441            "MASKING": lambda self: self._parse_with_constraint(),
442            "PROJECTION": lambda self: self._parse_with_constraint(),
443            "TAG": lambda self: self._parse_with_constraint(),
444        }
445
446        STAGED_FILE_SINGLE_TOKENS = {
447            TokenType.DOT,
448            TokenType.MOD,
449            TokenType.SLASH,
450        }
451
452        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
453
454        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
455
456        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
457
458        LAMBDAS = {
459            **parser.Parser.LAMBDAS,
460            TokenType.ARROW: lambda self, expressions: self.expression(
461                exp.Lambda,
462                this=self._replace_lambda(
463                    self._parse_assignment(),
464                    expressions,
465                ),
466                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
467            ),
468        }
469
470        def _negate_range(
471            self, this: t.Optional[exp.Expression] = None
472        ) -> t.Optional[exp.Expression]:
473            if not this:
474                return this
475
476            query = this.args.get("query")
477            if isinstance(this, exp.In) and isinstance(query, exp.Query):
478                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
479                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
480                # which can produce different results (most likely a SnowFlake bug).
481                #
482                # https://docs.snowflake.com/en/sql-reference/functions/in
483                # Context: https://github.com/tobymao/sqlglot/issues/3890
484                return self.expression(
485                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
486                )
487
488            return self.expression(exp.Not, this=this)
489
490        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
491            if self._prev.token_type != TokenType.WITH:
492                self._retreat(self._index - 1)
493
494            if self._match_text_seq("MASKING", "POLICY"):
495                policy = self._parse_column()
496                return self.expression(
497                    exp.MaskingPolicyColumnConstraint,
498                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
499                    expressions=self._match(TokenType.USING)
500                    and self._parse_wrapped_csv(self._parse_id_var),
501                )
502            if self._match_text_seq("PROJECTION", "POLICY"):
503                policy = self._parse_column()
504                return self.expression(
505                    exp.ProjectionPolicyColumnConstraint,
506                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
507                )
508            if self._match(TokenType.TAG):
509                return self.expression(
510                    exp.TagColumnConstraint,
511                    expressions=self._parse_wrapped_csv(self._parse_property),
512                )
513
514            return None
515
516        def _parse_create(self) -> exp.Create | exp.Command:
517            expression = super()._parse_create()
518            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
519                # Replace the Table node with the enclosed Identifier
520                expression.this.replace(expression.this.this)
521
522            return expression
523
524        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
525        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
526        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
527            this = self._parse_var() or self._parse_type()
528
529            if not this:
530                return None
531
532            self._match(TokenType.COMMA)
533            expression = self._parse_bitwise()
534            this = map_date_part(this)
535            name = this.name.upper()
536
537            if name.startswith("EPOCH"):
538                if name == "EPOCH_MILLISECOND":
539                    scale = 10**3
540                elif name == "EPOCH_MICROSECOND":
541                    scale = 10**6
542                elif name == "EPOCH_NANOSECOND":
543                    scale = 10**9
544                else:
545                    scale = None
546
547                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
548                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
549
550                if scale:
551                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
552
553                return to_unix
554
555            return self.expression(exp.Extract, this=this, expression=expression)
556
557        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
558            if is_map:
559                # Keys are strings in Snowflake's objects, see also:
560                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
561                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
562                return self._parse_slice(self._parse_string())
563
564            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
565
566        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
567            lateral = super()._parse_lateral()
568            if not lateral:
569                return lateral
570
571            if isinstance(lateral.this, exp.Explode):
572                table_alias = lateral.args.get("alias")
573                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
574                if table_alias and not table_alias.args.get("columns"):
575                    table_alias.set("columns", columns)
576                elif not table_alias:
577                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
578
579            return lateral
580
581        def _parse_table_parts(
582            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
583        ) -> exp.Table:
584            # https://docs.snowflake.com/en/user-guide/querying-stage
585            if self._match(TokenType.STRING, advance=False):
586                table = self._parse_string()
587            elif self._match_text_seq("@", advance=False):
588                table = self._parse_location_path()
589            else:
590                table = None
591
592            if table:
593                file_format = None
594                pattern = None
595
596                wrapped = self._match(TokenType.L_PAREN)
597                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
598                    if self._match_text_seq("FILE_FORMAT", "=>"):
599                        file_format = self._parse_string() or super()._parse_table_parts(
600                            is_db_reference=is_db_reference
601                        )
602                    elif self._match_text_seq("PATTERN", "=>"):
603                        pattern = self._parse_string()
604                    else:
605                        break
606
607                    self._match(TokenType.COMMA)
608
609                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
610            else:
611                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
612
613            return table
614
615        def _parse_id_var(
616            self,
617            any_token: bool = True,
618            tokens: t.Optional[t.Collection[TokenType]] = None,
619        ) -> t.Optional[exp.Expression]:
620            if self._match_text_seq("IDENTIFIER", "("):
621                identifier = (
622                    super()._parse_id_var(any_token=any_token, tokens=tokens)
623                    or self._parse_string()
624                )
625                self._match_r_paren()
626                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
627
628            return super()._parse_id_var(any_token=any_token, tokens=tokens)
629
630        def _parse_show_snowflake(self, this: str) -> exp.Show:
631            scope = None
632            scope_kind = None
633
634            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
635            # which is syntactically valid but has no effect on the output
636            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
637
638            history = self._match_text_seq("HISTORY")
639
640            like = self._parse_string() if self._match(TokenType.LIKE) else None
641
642            if self._match(TokenType.IN):
643                if self._match_text_seq("ACCOUNT"):
644                    scope_kind = "ACCOUNT"
645                elif self._match_set(self.DB_CREATABLES):
646                    scope_kind = self._prev.text.upper()
647                    if self._curr:
648                        scope = self._parse_table_parts()
649                elif self._curr:
650                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
651                    scope = self._parse_table_parts()
652
653            return self.expression(
654                exp.Show,
655                **{
656                    "terse": terse,
657                    "this": this,
658                    "history": history,
659                    "like": like,
660                    "scope": scope,
661                    "scope_kind": scope_kind,
662                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
663                    "limit": self._parse_limit(),
664                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
665                },
666            )
667
668        def _parse_location_property(self) -> exp.LocationProperty:
669            self._match(TokenType.EQ)
670            return self.expression(exp.LocationProperty, this=self._parse_location_path())
671
672        def _parse_file_location(self) -> t.Optional[exp.Expression]:
673            # Parse either a subquery or a staged file
674            return (
675                self._parse_select(table=True, parse_subquery_alias=False)
676                if self._match(TokenType.L_PAREN, advance=False)
677                else self._parse_table_parts()
678            )
679
680        def _parse_location_path(self) -> exp.Var:
681            parts = [self._advance_any(ignore_reserved=True)]
682
683            # We avoid consuming a comma token because external tables like @foo and @bar
684            # can be joined in a query with a comma separator, as well as closing paren
685            # in case of subqueries
686            while self._is_connected() and not self._match_set(
687                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
688            ):
689                parts.append(self._advance_any(ignore_reserved=True))
690
691            return exp.var("".join(part.text for part in parts if part))
692
693        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
694            this = super()._parse_lambda_arg()
695
696            if not this:
697                return this
698
699            typ = self._parse_types()
700
701            if typ:
702                return self.expression(exp.Cast, this=this, to=typ)
703
704            return this

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_VARIANT_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.DATE32: 'DATE32'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.ALL: 'ALL'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TAG: 'TAG'>, <TokenType.CASE: 'CASE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.UUID: 'UUID'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.IS: 'IS'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.NESTED: 'NESTED'>, <TokenType.LEFT: 'LEFT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.JSON: 'JSON'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.MODEL: 'MODEL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.SUPER: 'SUPER'>, <TokenType.INT256: 'INT256'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.COPY: 'COPY'>, <TokenType.RANGE: 'RANGE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.INET: 'INET'>, <TokenType.KILL: 'KILL'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.SET: 'SET'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MONEY: 'MONEY'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.INDEX: 'INDEX'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.FIRST: 'FIRST'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.INT: 'INT'>, <TokenType.CHAR: 'CHAR'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.ENUM: 'ENUM'>, <TokenType.BIT: 'BIT'>, <TokenType.RENAME: 'RENAME'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.CACHE: 'CACHE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UINT256: 'UINT256'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.ANTI: 'ANTI'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TOP: 'TOP'>, <TokenType.SOME: 'SOME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.SEMI: 'SEMI'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.MAP: 'MAP'>, <TokenType.APPLY: 'APPLY'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.IPV6: 'IPV6'>, <TokenType.SHOW: 'SHOW'>, <TokenType.DIV: 'DIV'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.END: 'END'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.IPV4: 'IPV4'>, <TokenType.ROW: 'ROW'>, <TokenType.INT128: 'INT128'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.KEEP: 'KEEP'>, <TokenType.NEXT: 'NEXT'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.UINT: 'UINT'>, <TokenType.ASOF: 'ASOF'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.ANY: 'ANY'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.NAME: 'NAME'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.ASC: 'ASC'>, <TokenType.FILTER: 'FILTER'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.CUBE: 'CUBE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.DATE: 'DATE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.FULL: 'FULL'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.USE: 'USE'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.LOAD: 'LOAD'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.VAR: 'VAR'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.XML: 'XML'>, <TokenType.DESC: 'DESC'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.LIST: 'LIST'>, <TokenType.MERGE: 'MERGE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.NULL: 'NULL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.UBIGINT: 'UBIGINT'>}
TABLE_ALIAS_TOKENS = {<TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.DATE32: 'DATE32'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.ALL: 'ALL'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TAG: 'TAG'>, <TokenType.CASE: 'CASE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.UUID: 'UUID'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.IS: 'IS'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.NESTED: 'NESTED'>, <TokenType.JSON: 'JSON'>, <TokenType.DELETE: 'DELETE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.MODEL: 'MODEL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.SUPER: 'SUPER'>, <TokenType.INT256: 'INT256'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.COPY: 'COPY'>, <TokenType.RANGE: 'RANGE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.INET: 'INET'>, <TokenType.KILL: 'KILL'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.SET: 'SET'>, <TokenType.ROWS: 'ROWS'>, <TokenType.MONEY: 'MONEY'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.INDEX: 'INDEX'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.FIRST: 'FIRST'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.INT: 'INT'>, <TokenType.CHAR: 'CHAR'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.ENUM: 'ENUM'>, <TokenType.BIT: 'BIT'>, <TokenType.RENAME: 'RENAME'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.CACHE: 'CACHE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UINT256: 'UINT256'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TOP: 'TOP'>, <TokenType.SOME: 'SOME'>, <TokenType.ANTI: 'ANTI'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.TIME: 'TIME'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.MAP: 'MAP'>, <TokenType.SEMI: 'SEMI'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.IPV6: 'IPV6'>, <TokenType.SHOW: 'SHOW'>, <TokenType.DIV: 'DIV'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.END: 'END'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.IPV4: 'IPV4'>, <TokenType.ROW: 'ROW'>, <TokenType.INT128: 'INT128'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.KEEP: 'KEEP'>, <TokenType.NEXT: 'NEXT'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.UINT: 'UINT'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.ANY: 'ANY'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.NAME: 'NAME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.ASC: 'ASC'>, <TokenType.FILTER: 'FILTER'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.CUBE: 'CUBE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.DATE: 'DATE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.USE: 'USE'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.LOAD: 'LOAD'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.VAR: 'VAR'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.XML: 'XML'>, <TokenType.DESC: 'DESC'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.LIST: 'LIST'>, <TokenType.MERGE: 'MERGE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.NULL: 'NULL'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.UBIGINT: 'UBIGINT'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPLY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Apply'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Columns'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function Snowflake.Parser.<lambda>>, 'LEN': <function Snowflake.Parser.<lambda>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'OVERLAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Overlay'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SPLIT_PART': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SplitPart'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_DOUBLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDouble'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'INSTR': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY_CONSTRUCT': <function Snowflake.Parser.<lambda>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'MEDIAN': <function Snowflake.Parser.<lambda>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <function Snowflake.Parser.<lambda>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'OVERLAY': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMETZ: 'TIMETZ'>}
RANGE_PARSERS = {<TokenType.AT_GT: 'AT_GT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.LT_AT: 'LT_AT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'AS': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SWAP': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.GRANT: 'GRANT'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.SLASH: 'SLASH'>, <TokenType.MOD: 'MOD'>, <TokenType.DOT: 'DOT'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'VIEWS', 'UNIQUE KEYS', 'TABLES', 'IMPORTED KEYS', 'SEQUENCES', 'OBJECTS'}
NON_TABLE_CREATABLES = {'STREAMLIT', 'STORAGE INTEGRATION', 'WAREHOUSE', 'TAG'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
INTERVAL_VARS
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
PROCEDURE_OPTIONS
EXECUTE_AS_OPTIONS
KEY_CONSTRAINT_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
PRIVILEGE_FOLLOW_TOKENS
DESCRIBE_STYLES
OPERATION_MODIFIERS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
706    class Tokenizer(tokens.Tokenizer):
707        STRING_ESCAPES = ["\\", "'"]
708        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
709        RAW_STRINGS = ["$$"]
710        COMMENTS = ["--", "//", ("/*", "*/")]
711        NESTED_COMMENTS = False
712
713        KEYWORDS = {
714            **tokens.Tokenizer.KEYWORDS,
715            "BYTEINT": TokenType.INT,
716            "CHAR VARYING": TokenType.VARCHAR,
717            "CHARACTER VARYING": TokenType.VARCHAR,
718            "EXCLUDE": TokenType.EXCEPT,
719            "ILIKE ANY": TokenType.ILIKE_ANY,
720            "LIKE ANY": TokenType.LIKE_ANY,
721            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
722            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
723            "MINUS": TokenType.EXCEPT,
724            "NCHAR VARYING": TokenType.VARCHAR,
725            "PUT": TokenType.COMMAND,
726            "REMOVE": TokenType.COMMAND,
727            "RM": TokenType.COMMAND,
728            "SAMPLE": TokenType.TABLE_SAMPLE,
729            "SQL_DOUBLE": TokenType.DOUBLE,
730            "SQL_VARCHAR": TokenType.VARCHAR,
731            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
732            "TAG": TokenType.TAG,
733            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
734            "TOP": TokenType.TOP,
735            "WAREHOUSE": TokenType.WAREHOUSE,
736            "STREAMLIT": TokenType.STREAMLIT,
737        }
738        KEYWORDS.pop("/*+")
739
740        SINGLE_TOKENS = {
741            **tokens.Tokenizer.SINGLE_TOKENS,
742            "$": TokenType.PARAMETER,
743        }
744
745        VAR_SINGLE_TOKENS = {"$"}
746
747        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.COMMAND: 'COMMAND'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FETCH: 'FETCH'>, <TokenType.RENAME: 'RENAME'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 749    class Generator(generator.Generator):
 750        PARAMETER_TOKEN = "$"
 751        MATCHED_BY_SOURCE = False
 752        SINGLE_STRING_INTERVAL = True
 753        JOIN_HINTS = False
 754        TABLE_HINTS = False
 755        QUERY_HINTS = False
 756        AGGREGATE_FILTER_SUPPORTED = False
 757        SUPPORTS_TABLE_COPY = False
 758        COLLATE_IS_FUNC = True
 759        LIMIT_ONLY_LITERALS = True
 760        JSON_KEY_VALUE_PAIR_SEP = ","
 761        INSERT_OVERWRITE = " OVERWRITE INTO"
 762        STRUCT_DELIMITER = ("(", ")")
 763        COPY_PARAMS_ARE_WRAPPED = False
 764        COPY_PARAMS_EQ_REQUIRED = True
 765        STAR_EXCEPT = "EXCLUDE"
 766        SUPPORTS_EXPLODING_PROJECTIONS = False
 767        ARRAY_CONCAT_IS_VAR_LEN = False
 768        SUPPORTS_CONVERT_TIMEZONE = True
 769        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 770
 771        TRANSFORMS = {
 772            **generator.Generator.TRANSFORMS,
 773            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 774            exp.ArgMax: rename_func("MAX_BY"),
 775            exp.ArgMin: rename_func("MIN_BY"),
 776            exp.Array: inline_array_sql,
 777            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 778            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 779            exp.AtTimeZone: lambda self, e: self.func(
 780                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 781            ),
 782            exp.BitwiseXor: rename_func("BITXOR"),
 783            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 784            exp.DateAdd: date_delta_sql("DATEADD"),
 785            exp.DateDiff: date_delta_sql("DATEDIFF"),
 786            exp.DateStrToDate: datestrtodate_sql,
 787            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 788            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 789            exp.DayOfYear: rename_func("DAYOFYEAR"),
 790            exp.Explode: rename_func("FLATTEN"),
 791            exp.Extract: rename_func("DATE_PART"),
 792            exp.FromTimeZone: lambda self, e: self.func(
 793                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 794            ),
 795            exp.GenerateSeries: lambda self, e: self.func(
 796                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 797            ),
 798            exp.GroupConcat: rename_func("LISTAGG"),
 799            exp.If: if_sql(name="IFF", false_value="NULL"),
 800            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 801            exp.JSONExtractScalar: lambda self, e: self.func(
 802                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 803            ),
 804            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 805            exp.JSONPathRoot: lambda *_: "",
 806            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 807            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 808            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 809            exp.Max: max_or_greatest,
 810            exp.Min: min_or_least,
 811            exp.ParseJSON: lambda self, e: self.func(
 812                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 813            ),
 814            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 815            exp.PercentileCont: transforms.preprocess(
 816                [transforms.add_within_group_for_percentiles]
 817            ),
 818            exp.PercentileDisc: transforms.preprocess(
 819                [transforms.add_within_group_for_percentiles]
 820            ),
 821            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 822            exp.RegexpILike: _regexpilike_sql,
 823            exp.Rand: rename_func("RANDOM"),
 824            exp.Select: transforms.preprocess(
 825                [
 826                    transforms.eliminate_distinct_on,
 827                    transforms.explode_to_unnest(),
 828                    transforms.eliminate_semi_and_anti_joins,
 829                    _unnest_generate_date_array,
 830                ]
 831            ),
 832            exp.SHA: rename_func("SHA1"),
 833            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 834            exp.StartsWith: rename_func("STARTSWITH"),
 835            exp.StrPosition: lambda self, e: self.func(
 836                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 837            ),
 838            exp.Stuff: rename_func("INSERT"),
 839            exp.TimeAdd: date_delta_sql("TIMEADD"),
 840            exp.TimestampDiff: lambda self, e: self.func(
 841                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 842            ),
 843            exp.TimestampTrunc: timestamptrunc_sql(),
 844            exp.TimeStrToTime: timestrtotime_sql,
 845            exp.TimeToStr: lambda self, e: self.func(
 846                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 847            ),
 848            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 849            exp.ToArray: rename_func("TO_ARRAY"),
 850            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 851            exp.ToDouble: rename_func("TO_DOUBLE"),
 852            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 853            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 854            exp.TsOrDsToDate: lambda self, e: self.func(
 855                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 856            ),
 857            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 858            exp.Uuid: rename_func("UUID_STRING"),
 859            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 860            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 861            exp.Xor: rename_func("BOOLXOR"),
 862        }
 863
 864        SUPPORTED_JSON_PATH_PARTS = {
 865            exp.JSONPathKey,
 866            exp.JSONPathRoot,
 867            exp.JSONPathSubscript,
 868        }
 869
 870        TYPE_MAPPING = {
 871            **generator.Generator.TYPE_MAPPING,
 872            exp.DataType.Type.NESTED: "OBJECT",
 873            exp.DataType.Type.STRUCT: "OBJECT",
 874        }
 875
 876        PROPERTIES_LOCATION = {
 877            **generator.Generator.PROPERTIES_LOCATION,
 878            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 879            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 880        }
 881
 882        UNSUPPORTED_VALUES_EXPRESSIONS = {
 883            exp.Map,
 884            exp.StarMap,
 885            exp.Struct,
 886            exp.VarMap,
 887        }
 888
 889        def with_properties(self, properties: exp.Properties) -> str:
 890            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 891
 892        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 893            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 894                values_as_table = False
 895
 896            return super().values_sql(expression, values_as_table=values_as_table)
 897
 898        def datatype_sql(self, expression: exp.DataType) -> str:
 899            expressions = expression.expressions
 900            if (
 901                expressions
 902                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 903                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 904            ):
 905                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 906                return "OBJECT"
 907
 908            return super().datatype_sql(expression)
 909
 910        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 911            return self.func(
 912                "TO_NUMBER",
 913                expression.this,
 914                expression.args.get("format"),
 915                expression.args.get("precision"),
 916                expression.args.get("scale"),
 917            )
 918
 919        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 920            milli = expression.args.get("milli")
 921            if milli is not None:
 922                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 923                expression.set("nano", milli_to_nano)
 924
 925            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 926
 927        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 928            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 929                return self.func("TO_GEOGRAPHY", expression.this)
 930            if expression.is_type(exp.DataType.Type.GEOMETRY):
 931                return self.func("TO_GEOMETRY", expression.this)
 932
 933            return super().cast_sql(expression, safe_prefix=safe_prefix)
 934
 935        def trycast_sql(self, expression: exp.TryCast) -> str:
 936            value = expression.this
 937
 938            if value.type is None:
 939                from sqlglot.optimizer.annotate_types import annotate_types
 940
 941                value = annotate_types(value)
 942
 943            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 944                return super().trycast_sql(expression)
 945
 946            # TRY_CAST only works for string values in Snowflake
 947            return self.cast_sql(expression)
 948
 949        def log_sql(self, expression: exp.Log) -> str:
 950            if not expression.expression:
 951                return self.func("LN", expression.this)
 952
 953            return super().log_sql(expression)
 954
 955        def unnest_sql(self, expression: exp.Unnest) -> str:
 956            unnest_alias = expression.args.get("alias")
 957            offset = expression.args.get("offset")
 958
 959            columns = [
 960                exp.to_identifier("seq"),
 961                exp.to_identifier("key"),
 962                exp.to_identifier("path"),
 963                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 964                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 965                or exp.to_identifier("value"),
 966                exp.to_identifier("this"),
 967            ]
 968
 969            if unnest_alias:
 970                unnest_alias.set("columns", columns)
 971            else:
 972                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 973
 974            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 975            alias = self.sql(unnest_alias)
 976            alias = f" AS {alias}" if alias else ""
 977            return f"{explode}{alias}"
 978
 979        def show_sql(self, expression: exp.Show) -> str:
 980            terse = "TERSE " if expression.args.get("terse") else ""
 981            history = " HISTORY" if expression.args.get("history") else ""
 982            like = self.sql(expression, "like")
 983            like = f" LIKE {like}" if like else ""
 984
 985            scope = self.sql(expression, "scope")
 986            scope = f" {scope}" if scope else ""
 987
 988            scope_kind = self.sql(expression, "scope_kind")
 989            if scope_kind:
 990                scope_kind = f" IN {scope_kind}"
 991
 992            starts_with = self.sql(expression, "starts_with")
 993            if starts_with:
 994                starts_with = f" STARTS WITH {starts_with}"
 995
 996            limit = self.sql(expression, "limit")
 997
 998            from_ = self.sql(expression, "from")
 999            if from_:
1000                from_ = f" FROM {from_}"
1001
1002            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1003
1004        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1005            # Other dialects don't support all of the following parameters, so we need to
1006            # generate default values as necessary to ensure the transpilation is correct
1007            group = expression.args.get("group")
1008
1009            # To avoid generating all these default values, we set group to None if
1010            # it's 0 (also default value) which doesn't trigger the following chain
1011            if group and group.name == "0":
1012                group = None
1013
1014            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1015            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1016            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1017
1018            return self.func(
1019                "REGEXP_SUBSTR",
1020                expression.this,
1021                expression.expression,
1022                position,
1023                occurrence,
1024                parameters,
1025                group,
1026            )
1027
1028        def describe_sql(self, expression: exp.Describe) -> str:
1029            # Default to table if kind is unknown
1030            kind_value = expression.args.get("kind") or "TABLE"
1031            kind = f" {kind_value}" if kind_value else ""
1032            this = f" {self.sql(expression, 'this')}"
1033            expressions = self.expressions(expression, flat=True)
1034            expressions = f" {expressions}" if expressions else ""
1035            return f"DESCRIBE{kind}{this}{expressions}"
1036
1037        def generatedasidentitycolumnconstraint_sql(
1038            self, expression: exp.GeneratedAsIdentityColumnConstraint
1039        ) -> str:
1040            start = expression.args.get("start")
1041            start = f" START {start}" if start else ""
1042            increment = expression.args.get("increment")
1043            increment = f" INCREMENT {increment}" if increment else ""
1044            return f"AUTOINCREMENT{start}{increment}"
1045
1046        def cluster_sql(self, expression: exp.Cluster) -> str:
1047            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1048
1049        def struct_sql(self, expression: exp.Struct) -> str:
1050            keys = []
1051            values = []
1052
1053            for i, e in enumerate(expression.expressions):
1054                if isinstance(e, exp.PropertyEQ):
1055                    keys.append(
1056                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1057                    )
1058                    values.append(e.expression)
1059                else:
1060                    keys.append(exp.Literal.string(f"_{i}"))
1061                    values.append(e)
1062
1063            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1064
1065        @generator.unsupported_args("weight", "accuracy")
1066        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1067            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1068
1069        def alterset_sql(self, expression: exp.AlterSet) -> str:
1070            exprs = self.expressions(expression, flat=True)
1071            exprs = f" {exprs}" if exprs else ""
1072            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1073            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1074            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1075            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1076            tag = self.expressions(expression, key="tag", flat=True)
1077            tag = f" TAG {tag}" if tag else ""
1078
1079            return f"SET{exprs}{file_format}{copy_options}{tag}"
1080
1081        def strtotime_sql(self, expression: exp.StrToTime):
1082            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1083            return self.func(
1084                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1085            )

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
SUPPORTS_EXPLODING_PROJECTIONS = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_CONVERT_TIMEZONE = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SwapTable'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TagColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithProcedureOptions'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToDouble'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithProcedureOptions'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.Map'>, <class 'sqlglot.expressions.VarMap'>, <class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.StarMap'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
889        def with_properties(self, properties: exp.Properties) -> str:
890            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
892        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
893            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
894                values_as_table = False
895
896            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
898        def datatype_sql(self, expression: exp.DataType) -> str:
899            expressions = expression.expressions
900            if (
901                expressions
902                and expression.is_type(*exp.DataType.STRUCT_TYPES)
903                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
904            ):
905                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
906                return "OBJECT"
907
908            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
910        def tonumber_sql(self, expression: exp.ToNumber) -> str:
911            return self.func(
912                "TO_NUMBER",
913                expression.this,
914                expression.args.get("format"),
915                expression.args.get("precision"),
916                expression.args.get("scale"),
917            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
919        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
920            milli = expression.args.get("milli")
921            if milli is not None:
922                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
923                expression.set("nano", milli_to_nano)
924
925            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
927        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
928            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
929                return self.func("TO_GEOGRAPHY", expression.this)
930            if expression.is_type(exp.DataType.Type.GEOMETRY):
931                return self.func("TO_GEOMETRY", expression.this)
932
933            return super().cast_sql(expression, safe_prefix=safe_prefix)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
935        def trycast_sql(self, expression: exp.TryCast) -> str:
936            value = expression.this
937
938            if value.type is None:
939                from sqlglot.optimizer.annotate_types import annotate_types
940
941                value = annotate_types(value)
942
943            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
944                return super().trycast_sql(expression)
945
946            # TRY_CAST only works for string values in Snowflake
947            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
949        def log_sql(self, expression: exp.Log) -> str:
950            if not expression.expression:
951                return self.func("LN", expression.this)
952
953            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
955        def unnest_sql(self, expression: exp.Unnest) -> str:
956            unnest_alias = expression.args.get("alias")
957            offset = expression.args.get("offset")
958
959            columns = [
960                exp.to_identifier("seq"),
961                exp.to_identifier("key"),
962                exp.to_identifier("path"),
963                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
964                seq_get(unnest_alias.columns if unnest_alias else [], 0)
965                or exp.to_identifier("value"),
966                exp.to_identifier("this"),
967            ]
968
969            if unnest_alias:
970                unnest_alias.set("columns", columns)
971            else:
972                unnest_alias = exp.TableAlias(this="_u", columns=columns)
973
974            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
975            alias = self.sql(unnest_alias)
976            alias = f" AS {alias}" if alias else ""
977            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
 979        def show_sql(self, expression: exp.Show) -> str:
 980            terse = "TERSE " if expression.args.get("terse") else ""
 981            history = " HISTORY" if expression.args.get("history") else ""
 982            like = self.sql(expression, "like")
 983            like = f" LIKE {like}" if like else ""
 984
 985            scope = self.sql(expression, "scope")
 986            scope = f" {scope}" if scope else ""
 987
 988            scope_kind = self.sql(expression, "scope_kind")
 989            if scope_kind:
 990                scope_kind = f" IN {scope_kind}"
 991
 992            starts_with = self.sql(expression, "starts_with")
 993            if starts_with:
 994                starts_with = f" STARTS WITH {starts_with}"
 995
 996            limit = self.sql(expression, "limit")
 997
 998            from_ = self.sql(expression, "from")
 999            if from_:
1000                from_ = f" FROM {from_}"
1001
1002            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
1004        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1005            # Other dialects don't support all of the following parameters, so we need to
1006            # generate default values as necessary to ensure the transpilation is correct
1007            group = expression.args.get("group")
1008
1009            # To avoid generating all these default values, we set group to None if
1010            # it's 0 (also default value) which doesn't trigger the following chain
1011            if group and group.name == "0":
1012                group = None
1013
1014            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1015            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1016            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1017
1018            return self.func(
1019                "REGEXP_SUBSTR",
1020                expression.this,
1021                expression.expression,
1022                position,
1023                occurrence,
1024                parameters,
1025                group,
1026            )
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1028        def describe_sql(self, expression: exp.Describe) -> str:
1029            # Default to table if kind is unknown
1030            kind_value = expression.args.get("kind") or "TABLE"
1031            kind = f" {kind_value}" if kind_value else ""
1032            this = f" {self.sql(expression, 'this')}"
1033            expressions = self.expressions(expression, flat=True)
1034            expressions = f" {expressions}" if expressions else ""
1035            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1037        def generatedasidentitycolumnconstraint_sql(
1038            self, expression: exp.GeneratedAsIdentityColumnConstraint
1039        ) -> str:
1040            start = expression.args.get("start")
1041            start = f" START {start}" if start else ""
1042            increment = expression.args.get("increment")
1043            increment = f" INCREMENT {increment}" if increment else ""
1044            return f"AUTOINCREMENT{start}{increment}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1046        def cluster_sql(self, expression: exp.Cluster) -> str:
1047            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1049        def struct_sql(self, expression: exp.Struct) -> str:
1050            keys = []
1051            values = []
1052
1053            for i, e in enumerate(expression.expressions):
1054                if isinstance(e, exp.PropertyEQ):
1055                    keys.append(
1056                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1057                    )
1058                    values.append(e.expression)
1059                else:
1060                    keys.append(exp.Literal.string(f"_{i}"))
1061                    values.append(e)
1062
1063            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
@generator.unsupported_args('weight', 'accuracy')
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1065        @generator.unsupported_args("weight", "accuracy")
1066        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1067            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1069        def alterset_sql(self, expression: exp.AlterSet) -> str:
1070            exprs = self.expressions(expression, flat=True)
1071            exprs = f" {exprs}" if exprs else ""
1072            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1073            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1074            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1075            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1076            tag = self.expressions(expression, key="tag", flat=True)
1077            tag = f" TAG {tag}" if tag else ""
1078
1079            return f"SET{exprs}{file_format}{copy_options}{tag}"
def strtotime_sql(self, expression: sqlglot.expressions.StrToTime):
1081        def strtotime_sql(self, expression: exp.StrToTime):
1082            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1083            return self.func(
1084                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1085            )
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'windows': <function Generator.<lambda>>, 'qualify': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
PARSE_JSON_NAME
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterdiststyle_sql
altersortkey_sql
alterrename_sql
renamecolumn_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
try_sql
use_sql
binary
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonexists_sql
arrayagg_sql
apply_sql
grant_sql
grantprivilege_sql
grantprincipal_sql
columns_sql
overlay_sql
todouble_sql