Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit302e066

Browse files
authored
Merge pull request#32 from sede-open/task/add-liquid-clustering-logic
Final clean-up
2 parentsa62c855 +bd87865 commit302e066

File tree

1 file changed

+1
-79
lines changed
  • src/databricks/sqlalchemy/dialect

1 file changed

+1
-79
lines changed

‎src/databricks/sqlalchemy/dialect/base.py‎

Lines changed: 1 addition & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -7,13 +7,6 @@
77
fromsqlalchemy.sql.type_apiimportTypeEngine
88

99

10-
# class DatabricksColumn(DefaultColumn):
11-
# """Represents a column in a databricks table."""
12-
# def __init__(self, *args, liquid_cluster=False, **kwargs):
13-
# self.liquid_cluster = liquid_cluster
14-
# super().__init__(*args, **kwargs)
15-
16-
1710
classDatabricksIdentifierPreparer(compiler.IdentifierPreparer):
1811
# SparkSQL identifier specification:
1912
# ref: https://spark.apache.org/docs/latest/sql-ref-identifier.html
@@ -28,34 +21,6 @@ class DatabricksDDLCompiler(compiler.DDLCompiler):
2821
defpost_create_table(self,table):
2922
return" USING DELTA"
3023

31-
# def get_column_specification(self, column: DatabricksColumn, **kwargs):
32-
# colspec = (
33-
# self.preparer.format_column(column)
34-
# + " "
35-
# + self.dialect.type_compiler.process(
36-
# column.type, type_expression=column
37-
# )
38-
# )
39-
#
40-
# default = self.get_column_default_string(column)
41-
# if default is not None:
42-
# colspec += " DEFAULT " + default
43-
#
44-
# if column.computed is not None:
45-
# colspec += " " + self.process(column.computed)
46-
#
47-
# if (
48-
# column.identity is not None
49-
# and self.dialect.supports_identity_columns
50-
# ):
51-
# colspec += " " + self.process(column.identity)
52-
#
53-
# if not column.nullable and (
54-
# not column.identity or not self.dialect.supports_identity_columns
55-
# ):
56-
# colspec += " NOT NULL"
57-
# return colspec
58-
5924
defvisit_set_column_comment(self,create,**kw):
6025
"""
6126
Example syntax for adding column comment:
@@ -150,7 +115,7 @@ def visit_create_table(self, create, **kw):
150115
from_=ce,
151116
)
152117

153-
# Check and apply liquid clustering
118+
# Checkforand apply liquid clustering
154119
if'databricks'incolumn.dialect_options:
155120
try:
156121
cluster_on=column.dialect_options['databricks'].__getitem__('cluster_key')
@@ -184,34 +149,6 @@ def visit_drop_table(self, drop, **kw):
184149

185150
returntext+self.preparer.format_table(drop.element)
186151

187-
# def visit_create_column(self, create, first_pk=False, **kw):
188-
# column = create.element
189-
190-
# if column.system:
191-
# return None
192-
193-
# text = self.get_column_specification(column, first_pk=first_pk)
194-
# const = " ".join(
195-
# self.process(constraint) for constraint in column.constraints
196-
# )
197-
# if const:
198-
# text += " " + const
199-
200-
# # Code to deal with NOT NULL being unsupported in ADD COLUMNS clause
201-
# if "NOT NULL" in text:
202-
# text.replace("NOT NULL", "")
203-
# text += """;
204-
# ALTER TABLE {0} ALTER COLUMN {1} SET NOT NULL;
205-
# """.format(
206-
# self._format_table_from_column(
207-
# create, use_schema=True
208-
# ),
209-
# self.preparer.format_column(
210-
# create.element, use_table=False
211-
# )
212-
# )
213-
# return text
214-
215152

216153
@compiles(ColumnComment,"databricks")
217154
defvisit_column_comment(
@@ -231,18 +168,3 @@ def visit_column_comment(
231168
column_name=element.column_name,
232169
comment=comment,
233170
)
234-
235-
236-
# @compiles(ColumnType, "databricks")
237-
# def visit_column_type(element: ColumnType, compiler: DatabricksDDLCompiler, **kw) -> str:
238-
#
239-
#
240-
# return "%s %s %s" % (
241-
# alter_table(compiler, element.table_name, element.schema),
242-
# alter_column(compiler, element.column_name),
243-
# "TYPE %s" % format_type(compiler, element.type_),
244-
# )
245-
#
246-
#
247-
# def format_type(compiler: DatabricksDDLCompiler, type_: TypeEngine) -> str:
248-
# return compiler.dialect.type_compiler.process(type_)

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp