Skip to content

Commit e491e09

Browse files
committed
fix: override is_string/is_number/is_float/is_integer on DatabricksColumn
The base Column class type classification methods don't recognize Spark/Databricks type names like 'string', 'int', 'bigint', 'double'. This causes col.is_string() to return False for string columns and col.is_number() to return False for numeric columns. Override these methods on DatabricksColumn with the complete set of Databricks/Spark type names. Closes #1380
1 parent b1047b5 commit e491e09

1 file changed

Lines changed: 47 additions & 0 deletions

File tree

dbt/adapters/databricks/column.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,53 @@ def create(cls, name: str, label_or_dtype: str) -> "DatabricksColumn":
2828
column_type = cls.translate_type(label_or_dtype)
2929
return cls(name, column_type)
3030

31+
def is_string(self) -> bool:
32+
return self.dtype.lower() in {
33+
"string",
34+
"varchar",
35+
"char",
36+
"text",
37+
"character varying",
38+
"character",
39+
"nchar",
40+
"nvarchar",
41+
}
42+
43+
def is_number(self) -> bool:
44+
return self.dtype.lower() in {
45+
"tinyint",
46+
"smallint",
47+
"int",
48+
"integer",
49+
"bigint",
50+
"long",
51+
"float",
52+
"double",
53+
"decimal",
54+
"numeric",
55+
"real",
56+
} or self.dtype.lower().startswith("decimal(")
57+
58+
def is_float(self) -> bool:
59+
return self.dtype.lower() in {
60+
"float",
61+
"double",
62+
"real",
63+
}
64+
65+
def is_integer(self) -> bool:
66+
return self.dtype.lower() in {
67+
"tinyint",
68+
"smallint",
69+
"int",
70+
"integer",
71+
"bigint",
72+
"long",
73+
}
74+
75+
def is_numeric(self) -> bool:
76+
return self.is_number()
77+
3178
@classmethod
3279
def from_json_metadata(cls, json_metadata: str) -> list["DatabricksColumn"]:
3380
"""

0 commit comments

Comments
 (0)