Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Final clean-up#32

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.

Already on GitHub?Sign in to your account

Merged
PeterDKay merged 1 commit intodevelopfromtask/add-liquid-clustering-logic
Aug 21, 2023
Merged
Changes fromall commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 1 addition & 79 deletionssrc/databricks/sqlalchemy/dialect/base.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -7,13 +7,6 @@
from sqlalchemy.sql.type_api import TypeEngine


# class DatabricksColumn(DefaultColumn):
# """Represents a column in a databricks table."""
# def __init__(self, *args, liquid_cluster=False, **kwargs):
# self.liquid_cluster = liquid_cluster
# super().__init__(*args, **kwargs)


class DatabricksIdentifierPreparer(compiler.IdentifierPreparer):
# SparkSQL identifier specification:
# ref: https://spark.apache.org/docs/latest/sql-ref-identifier.html
Expand All@@ -28,34 +21,6 @@ class DatabricksDDLCompiler(compiler.DDLCompiler):
def post_create_table(self, table):
return " USING DELTA"

# def get_column_specification(self, column: DatabricksColumn, **kwargs):
# colspec = (
# self.preparer.format_column(column)
# + " "
# + self.dialect.type_compiler.process(
# column.type, type_expression=column
# )
# )
#
# default = self.get_column_default_string(column)
# if default is not None:
# colspec += " DEFAULT " + default
#
# if column.computed is not None:
# colspec += " " + self.process(column.computed)
#
# if (
# column.identity is not None
# and self.dialect.supports_identity_columns
# ):
# colspec += " " + self.process(column.identity)
#
# if not column.nullable and (
# not column.identity or not self.dialect.supports_identity_columns
# ):
# colspec += " NOT NULL"
# return colspec

def visit_set_column_comment(self, create, **kw):
"""
Example syntax for adding column comment:
Expand DownExpand Up@@ -150,7 +115,7 @@ def visit_create_table(self, create, **kw):
from_=ce,
)

# Check and apply liquid clustering
# Checkforand apply liquid clustering
if 'databricks' in column.dialect_options:
try:
cluster_on = column.dialect_options['databricks'].__getitem__('cluster_key')
Expand DownExpand Up@@ -184,34 +149,6 @@ def visit_drop_table(self, drop, **kw):

return text + self.preparer.format_table(drop.element)

# def visit_create_column(self, create, first_pk=False, **kw):
# column = create.element

# if column.system:
# return None

# text = self.get_column_specification(column, first_pk=first_pk)
# const = " ".join(
# self.process(constraint) for constraint in column.constraints
# )
# if const:
# text += " " + const

# # Code to deal with NOT NULL being unsupported in ADD COLUMNS clause
# if "NOT NULL" in text:
# text.replace("NOT NULL", "")
# text += """;
# ALTER TABLE {0} ALTER COLUMN {1} SET NOT NULL;
# """.format(
# self._format_table_from_column(
# create, use_schema=True
# ),
# self.preparer.format_column(
# create.element, use_table=False
# )
# )
# return text


@compiles(ColumnComment, "databricks")
def visit_column_comment(
Expand All@@ -231,18 +168,3 @@ def visit_column_comment(
column_name=element.column_name,
comment=comment,
)


# @compiles(ColumnType, "databricks")
# def visit_column_type(element: ColumnType, compiler: DatabricksDDLCompiler, **kw) -> str:
#
#
# return "%s %s %s" % (
# alter_table(compiler, element.table_name, element.schema),
# alter_column(compiler, element.column_name),
# "TYPE %s" % format_type(compiler, element.type_),
# )
#
#
# def format_type(compiler: DatabricksDDLCompiler, type_: TypeEngine) -> str:
# return compiler.dialect.type_compiler.process(type_)

[8]ページ先頭

©2009-2025 Movatter.jp