Skip to content

Commit 2f8e7af

Browse files
authored
Merge branch 'main' into refactor--dataframe-join-params
2 parents bb83a74 + aedffe0 commit 2f8e7af

38 files changed

+588
-166
lines changed

Cargo.lock

Lines changed: 12 additions & 12 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ datafusion = { version = "42.0.0", features = ["pyarrow", "avro", "unicode_expre
4141
datafusion-substrait = { version = "42.0.0", optional = true }
4242
datafusion-proto = { version = "42.0.0" }
4343
prost = "0.13" # keep in line with `datafusion-substrait`
44-
uuid = { version = "1.9", features = ["v4"] }
44+
uuid = { version = "1.11", features = ["v4"] }
4545
mimalloc = { version = "0.1", optional = true, default-features = false, features = ["local_dynamic_tls"] }
4646
async-trait = "0.1"
4747
futures = "0.3"

docs/source/user-guide/common-operations/expressions.rst

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,12 +77,39 @@ approaches.
7777
df = ctx.from_pydict({"a": [[1, 2, 3], [4, 5, 6]]})
7878
df.select(col("a")[0].alias("a0"))
7979
80-
8180
.. warning::
8281

8382
Indexing an element of an array via ``[]`` starts at index 0 whereas
8483
:py:func:`~datafusion.functions.array_element` starts at index 1.
8584

85+
To check if an array is empty, you can use the function :py:func:`datafusion.functions.array_empty` or `datafusion.functions.empty`.
86+
This function returns a boolean indicating whether the array is empty.
87+
88+
.. ipython:: python
89+
90+
from datafusion import SessionContext, col
91+
from datafusion.functions import array_empty
92+
93+
ctx = SessionContext()
94+
df = ctx.from_pydict({"a": [[], [1, 2, 3]]})
95+
df.select(array_empty(col("a")).alias("is_empty"))
96+
97+
In this example, the `is_empty` column will contain `True` for the first row and `False` for the second row.
98+
99+
To get the total number of elements in an array, you can use the function :py:func:`datafusion.functions.cardinality`.
100+
This function returns an integer indicating the total number of elements in the array.
101+
102+
.. ipython:: python
103+
104+
from datafusion import SessionContext, col
105+
from datafusion.functions import cardinality
106+
107+
ctx = SessionContext()
108+
df = ctx.from_pydict({"a": [[1, 2, 3], [4, 5, 6]]})
109+
df.select(cardinality(col("a")).alias("num_elements"))
110+
111+
In this example, the `num_elements` column will contain `3` for both rows.
112+
86113
Structs
87114
-------
88115

docs/source/user-guide/common-operations/select-and-filter.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ DataFusion can work with several file types, to start simple we can use a subset
3333
3434
ctx = SessionContext()
3535
df = ctx.read_parquet("yellow_trip_data.parquet")
36-
df.select_columns("trip_distance", "passenger_count")
36+
df.select("trip_distance", "passenger_count")
3737
3838
For mathematical or logical operations use :py:func:`~datafusion.col` to select columns, and give meaningful names to the resulting
3939
operations using :py:func:`~datafusion.expr.Expr.alias`
@@ -48,7 +48,7 @@ operations using :py:func:`~datafusion.expr.Expr.alias`
4848

4949
Please be aware that all identifiers are effectively made lower-case in SQL, so if your file has capital letters
5050
(ex: Name) you must put your column name in double quotes or the selection won’t work. As an alternative for simple
51-
column selection use :py:func:`~datafusion.dataframe.DataFrame.select_columns` without double quotes
51+
column selection use :py:func:`~datafusion.dataframe.DataFrame.select` without double quotes
5252

5353
For selecting columns with capital letters use ``'"VendorID"'``
5454

examples/import.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
# The dictionary keys represent column names and the dictionary values
2929
# represent column values
3030
df = ctx.from_pydict({"a": [1, 2, 3], "b": [4, 5, 6]})
31-
assert type(df) == datafusion.DataFrame
31+
assert type(df) is datafusion.DataFrame
3232
# Dataframe:
3333
# +---+---+
3434
# | a | b |
@@ -40,19 +40,19 @@
4040

4141
# Create a datafusion DataFrame from a Python list of rows
4242
df = ctx.from_pylist([{"a": 1, "b": 4}, {"a": 2, "b": 5}, {"a": 3, "b": 6}])
43-
assert type(df) == datafusion.DataFrame
43+
assert type(df) is datafusion.DataFrame
4444

4545
# Convert pandas DataFrame to datafusion DataFrame
4646
pandas_df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
4747
df = ctx.from_pandas(pandas_df)
48-
assert type(df) == datafusion.DataFrame
48+
assert type(df) is datafusion.DataFrame
4949

5050
# Convert polars DataFrame to datafusion DataFrame
5151
polars_df = pl.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
5252
df = ctx.from_polars(polars_df)
53-
assert type(df) == datafusion.DataFrame
53+
assert type(df) is datafusion.DataFrame
5454

5555
# Convert Arrow Table to datafusion DataFrame
5656
arrow_table = pa.Table.from_pydict({"a": [1, 2, 3], "b": [4, 5, 6]})
5757
df = ctx.from_arrow(arrow_table)
58-
assert type(df) == datafusion.DataFrame
58+
assert type(df) is datafusion.DataFrame

examples/tpch/convert_data_to_parquet.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,6 @@
138138

139139
df = ctx.read_csv(source_file, schema=schema, has_header=False, delimiter="|")
140140

141-
df = df.select_columns(*output_cols)
141+
df = df.select(*output_cols)
142142

143143
df.write_parquet(dest_file, compression="snappy")

examples/tpch/q02_minimum_cost_supplier.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -43,10 +43,10 @@
4343

4444
ctx = SessionContext()
4545

46-
df_part = ctx.read_parquet(get_data_path("part.parquet")).select_columns(
46+
df_part = ctx.read_parquet(get_data_path("part.parquet")).select(
4747
"p_partkey", "p_mfgr", "p_type", "p_size"
4848
)
49-
df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns(
49+
df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select(
5050
"s_acctbal",
5151
"s_name",
5252
"s_address",
@@ -55,13 +55,13 @@
5555
"s_nationkey",
5656
"s_suppkey",
5757
)
58-
df_partsupp = ctx.read_parquet(get_data_path("partsupp.parquet")).select_columns(
58+
df_partsupp = ctx.read_parquet(get_data_path("partsupp.parquet")).select(
5959
"ps_partkey", "ps_suppkey", "ps_supplycost"
6060
)
61-
df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select_columns(
61+
df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select(
6262
"n_nationkey", "n_regionkey", "n_name"
6363
)
64-
df_region = ctx.read_parquet(get_data_path("region.parquet")).select_columns(
64+
df_region = ctx.read_parquet(get_data_path("region.parquet")).select(
6565
"r_regionkey", "r_name"
6666
)
6767

@@ -115,7 +115,7 @@
115115

116116
# From the problem statement, these are the values we wish to output
117117

118-
df = df.select_columns(
118+
df = df.select(
119119
"s_acctbal",
120120
"s_name",
121121
"n_name",

examples/tpch/q03_shipping_priority.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,13 +37,13 @@
3737

3838
ctx = SessionContext()
3939

40-
df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select_columns(
40+
df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select(
4141
"c_mktsegment", "c_custkey"
4242
)
43-
df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns(
43+
df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select(
4444
"o_orderdate", "o_shippriority", "o_custkey", "o_orderkey"
4545
)
46-
df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns(
46+
df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select(
4747
"l_orderkey", "l_extendedprice", "l_discount", "l_shipdate"
4848
)
4949

@@ -80,7 +80,7 @@
8080

8181
# Change the order that the columns are reported in just to match the spec
8282

83-
df = df.select_columns("l_orderkey", "revenue", "o_orderdate", "o_shippriority")
83+
df = df.select("l_orderkey", "revenue", "o_orderdate", "o_shippriority")
8484

8585
# Show result
8686

examples/tpch/q04_order_priority_checking.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,10 +39,10 @@
3939

4040
ctx = SessionContext()
4141

42-
df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns(
42+
df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select(
4343
"o_orderdate", "o_orderpriority", "o_orderkey"
4444
)
45-
df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns(
45+
df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select(
4646
"l_orderkey", "l_commitdate", "l_receiptdate"
4747
)
4848

@@ -54,7 +54,7 @@
5454
# Limit results to cases where commitment date before receipt date
5555
# Aggregate the results so we only get one row to join with the order table.
5656
# Alternately, and likely more idiomatic is instead of `.aggregate` you could
57-
# do `.select_columns("l_orderkey").distinct()`. The goal here is to show
57+
# do `.select("l_orderkey").distinct()`. The goal here is to show
5858
# multiple examples of how to use Data Fusion.
5959
df_lineitem = df_lineitem.filter(col("l_commitdate") < col("l_receiptdate")).aggregate(
6060
[col("l_orderkey")], []

examples/tpch/q05_local_supplier_volume.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -47,22 +47,22 @@
4747

4848
ctx = SessionContext()
4949

50-
df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select_columns(
50+
df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select(
5151
"c_custkey", "c_nationkey"
5252
)
53-
df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns(
53+
df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select(
5454
"o_custkey", "o_orderkey", "o_orderdate"
5555
)
56-
df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns(
56+
df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select(
5757
"l_orderkey", "l_suppkey", "l_extendedprice", "l_discount"
5858
)
59-
df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns(
59+
df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select(
6060
"s_suppkey", "s_nationkey"
6161
)
62-
df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select_columns(
62+
df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select(
6363
"n_nationkey", "n_regionkey", "n_name"
6464
)
65-
df_region = ctx.read_parquet(get_data_path("region.parquet")).select_columns(
65+
df_region = ctx.read_parquet(get_data_path("region.parquet")).select(
6666
"r_regionkey", "r_name"
6767
)
6868

examples/tpch/q06_forecasting_revenue_change.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@
5151

5252
ctx = SessionContext()
5353

54-
df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns(
54+
df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select(
5555
"l_shipdate", "l_quantity", "l_extendedprice", "l_discount"
5656
)
5757

examples/tpch/q07_volume_shipping.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -49,19 +49,19 @@
4949

5050
ctx = SessionContext()
5151

52-
df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns(
52+
df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select(
5353
"s_suppkey", "s_nationkey"
5454
)
55-
df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns(
55+
df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select(
5656
"l_shipdate", "l_extendedprice", "l_discount", "l_suppkey", "l_orderkey"
5757
)
58-
df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns(
58+
df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select(
5959
"o_orderkey", "o_custkey"
6060
)
61-
df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select_columns(
61+
df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select(
6262
"c_custkey", "c_nationkey"
6363
)
64-
df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select_columns(
64+
df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select(
6565
"n_nationkey", "n_name"
6666
)
6767

examples/tpch/q08_market_share.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -47,25 +47,23 @@
4747

4848
ctx = SessionContext()
4949

50-
df_part = ctx.read_parquet(get_data_path("part.parquet")).select_columns(
51-
"p_partkey", "p_type"
52-
)
53-
df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns(
50+
df_part = ctx.read_parquet(get_data_path("part.parquet")).select("p_partkey", "p_type")
51+
df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select(
5452
"s_suppkey", "s_nationkey"
5553
)
56-
df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns(
54+
df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select(
5755
"l_partkey", "l_extendedprice", "l_discount", "l_suppkey", "l_orderkey"
5856
)
59-
df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns(
57+
df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select(
6058
"o_orderkey", "o_custkey", "o_orderdate"
6159
)
62-
df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select_columns(
60+
df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select(
6361
"c_custkey", "c_nationkey"
6462
)
65-
df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select_columns(
63+
df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select(
6664
"n_nationkey", "n_name", "n_regionkey"
6765
)
68-
df_region = ctx.read_parquet(get_data_path("region.parquet")).select_columns(
66+
df_region = ctx.read_parquet(get_data_path("region.parquet")).select(
6967
"r_regionkey", "r_name"
7068
)
7169

@@ -133,7 +131,7 @@
133131

134132
# When we join to the customer dataframe, we don't want to confuse other columns, so only
135133
# select the supplier key that we need
136-
df_national_suppliers = df_national_suppliers.select_columns("s_suppkey")
134+
df_national_suppliers = df_national_suppliers.select("s_suppkey")
137135

138136

139137
# Part 3: Combine suppliers and customers and compute the market share

0 commit comments

Comments
 (0)