Skip to content

Commit b0f84dd

Browse files
authored
Fix: remove accidental pyspark dependency added in ruff refactor (#2587)
1 parent effe44a commit b0f84dd

File tree

1 file changed

+5
-3
lines changed

1 file changed

+5
-3
lines changed

sqlmesh/core/snapshot/evaluator.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030
from functools import reduce
3131

3232
import pandas as pd
33-
from pyspark.sql.dataframe import DataFrame as PySparkDataFrame
3433
from sqlglot import exp, select
3534
from sqlglot.executor import execute
3635

@@ -544,10 +543,13 @@ def apply(query_or_df: QueryOrDF, index: int = 0) -> None:
544543

545544
if limit is not None:
546545
query_or_df = next(queries_or_dfs)
547-
if isinstance(query_or_df, PySparkDataFrame):
548-
return query_or_df.limit(limit)
549546
if isinstance(query_or_df, pd.DataFrame):
550547
return query_or_df.head(limit)
548+
if not isinstance(query_or_df, exp.Expression):
549+
# We assume that if this branch is reached, `query_or_df` is a pyspark dataframe,
550+
# so we use `limit` instead of `head` to get back a dataframe instead of List[Row]
551+
# https://spark.apache.org/docs/3.1.1/api/python/reference/api/pyspark.sql.DataFrame.head.html#pyspark.sql.DataFrame.head
552+
return query_or_df.limit(limit)
551553

552554
assert isinstance(query_or_df, exp.Query)
553555

0 commit comments

Comments
 (0)