|
9 | 9 |
|
10 | 10 | import datafusion as df
|
11 | 11 | import pyarrow as pa
|
12 |
| -import pyarrow.dataset as ds |
13 | 12 | import pyarrow_hotfix # noqa: F401
|
14 | 13 | import sqlglot as sg
|
15 | 14 | import sqlglot.expressions as sge
|
|
28 | 27 | from ibis.common.dispatch import lazy_singledispatch
|
29 | 28 | from ibis.expr.operations.udf import InputType
|
30 | 29 | from ibis.formats.pyarrow import PyArrowSchema, PyArrowType
|
31 |
| -from ibis.util import deprecated, gen_name, normalize_filename, normalize_filenames |
| 30 | +from ibis.util import gen_name, normalize_filename, normalize_filenames, warn_deprecated |
32 | 31 |
|
33 | 32 | try:
|
34 | 33 | from datafusion import ExecutionContext as SessionContext
|
@@ -88,37 +87,30 @@ def do_connect(
|
88 | 87 | Parameters
|
89 | 88 | ----------
|
90 | 89 | config
|
91 |
| - Mapping of table names to files or a `SessionContext` |
| 90 | + Mapping of table names to files (deprecated in 10.0) or a `SessionContext` |
92 | 91 | instance.
|
93 | 92 |
|
94 | 93 | Examples
|
95 | 94 | --------
|
| 95 | + >>> from datafusion import SessionContext |
| 96 | + >>> ctx = SessionContext() |
| 97 | + >>> _ = ctx.from_pydict({"a": [1, 2, 3]}, "mytable") |
96 | 98 | >>> import ibis
|
97 |
| - >>> config = { |
98 |
| - ... "astronauts": "ci/ibis-testing-data/parquet/astronauts.parquet", |
99 |
| - ... "diamonds": "ci/ibis-testing-data/csv/diamonds.csv", |
100 |
| - ... } |
101 |
| - >>> con = ibis.datafusion.connect(config) |
| 99 | + >>> con = ibis.datafusion.connect(ctx) |
102 | 100 | >>> con.list_tables()
|
103 |
| - ['astronauts', 'diamonds'] |
104 |
| - >>> con.table("diamonds") |
105 |
| - DatabaseTable: diamonds |
106 |
| - carat float64 |
107 |
| - cut string |
108 |
| - color string |
109 |
| - clarity string |
110 |
| - depth float64 |
111 |
| - table float64 |
112 |
| - price int64 |
113 |
| - x float64 |
114 |
| - y float64 |
115 |
| - z float64 |
| 101 | + ['mytable'] |
116 | 102 | """
|
117 | 103 | if isinstance(config, SessionContext):
|
118 | 104 | (self.con, config) = (config, None)
|
119 | 105 | else:
|
120 | 106 | if config is not None and not isinstance(config, Mapping):
|
121 | 107 | raise TypeError("Input to ibis.datafusion.connect must be a mapping")
|
| 108 | + elif config is not None and config: # warn if dict is not empty |
| 109 | + warn_deprecated( |
| 110 | + "Passing a mapping of tables names to files", |
| 111 | + as_of="10.0", |
| 112 | + instead="Please use the explicit `read_*` methods for the files you would like to load instead.", |
| 113 | + ) |
122 | 114 | if SessionConfig is not None:
|
123 | 115 | df_config = SessionConfig(
|
124 | 116 | {"datafusion.sql_parser.dialect": "PostgreSQL"}
|
@@ -178,6 +170,57 @@ def _get_schema_using_query(self, query: str) -> sch.Schema:
|
178 | 170 |
|
179 | 171 | return PyArrowSchema.to_ibis(df.schema())
|
180 | 172 |
|
| 173 | + def _register( |
| 174 | + self, |
| 175 | + source: str | Path | pa.Table | pa.RecordBatch | pa.Dataset | pd.DataFrame, |
| 176 | + table_name: str | None = None, |
| 177 | + **kwargs: Any, |
| 178 | + ) -> ir.Table: |
| 179 | + import pandas as pd |
| 180 | + import pyarrow.dataset as ds |
| 181 | + |
| 182 | + if isinstance(source, (str, Path)): |
| 183 | + first = str(source) |
| 184 | + elif isinstance(source, pa.Table): |
| 185 | + self.con.deregister_table(table_name) |
| 186 | + self.con.register_record_batches(table_name, [source.to_batches()]) |
| 187 | + return self.table(table_name) |
| 188 | + elif isinstance(source, pa.RecordBatch): |
| 189 | + self.con.deregister_table(table_name) |
| 190 | + self.con.register_record_batches(table_name, [[source]]) |
| 191 | + return self.table(table_name) |
| 192 | + elif isinstance(source, ds.Dataset): |
| 193 | + self.con.deregister_table(table_name) |
| 194 | + self.con.register_dataset(table_name, source) |
| 195 | + return self.table(table_name) |
| 196 | + elif isinstance(source, pd.DataFrame): |
| 197 | + return self.register(pa.Table.from_pandas(source), table_name, **kwargs) |
| 198 | + else: |
| 199 | + raise ValueError("`source` must be either a string or a pathlib.Path") |
| 200 | + |
| 201 | + if first.startswith(("parquet://", "parq://")) or first.endswith( |
| 202 | + ("parq", "parquet") |
| 203 | + ): |
| 204 | + return self.read_parquet(source, table_name=table_name, **kwargs) |
| 205 | + elif first.startswith(("csv://", "txt://")) or first.endswith( |
| 206 | + ("csv", "tsv", "txt") |
| 207 | + ): |
| 208 | + return self.read_csv(source, table_name=table_name, **kwargs) |
| 209 | + else: |
| 210 | + self._register_failure() |
| 211 | + return None |
| 212 | + |
| 213 | + def _register_failure(self): |
| 214 | + import inspect |
| 215 | + |
| 216 | + msg = ", ".join( |
| 217 | + m[0] for m in inspect.getmembers(self) if m[0].startswith("read_") |
| 218 | + ) |
| 219 | + raise ValueError( |
| 220 | + f"Cannot infer appropriate read function for input, " |
| 221 | + f"please call one of {msg} directly" |
| 222 | + ) |
| 223 | + |
181 | 224 | def _register_builtin_udfs(self):
|
182 | 225 | from ibis.backends.datafusion import udfs
|
183 | 226 |
|
@@ -345,68 +388,6 @@ def get_schema(
|
345 | 388 | table = database.table(table_name)
|
346 | 389 | return sch.schema(table.schema)
|
347 | 390 |
|
348 |
| - @deprecated( |
349 |
| - as_of="9.1", |
350 |
| - instead="use the explicit `read_*` method for the filetype you are trying to read, e.g., read_parquet, read_csv, etc.", |
351 |
| - ) |
352 |
| - def register( |
353 |
| - self, |
354 |
| - source: str | Path | pa.Table | pa.RecordBatch | pa.Dataset | pd.DataFrame, |
355 |
| - table_name: str | None = None, |
356 |
| - **kwargs: Any, |
357 |
| - ) -> ir.Table: |
358 |
| - return self._register(source, table_name, **kwargs) |
359 |
| - |
360 |
| - def _register( |
361 |
| - self, |
362 |
| - source: str | Path | pa.Table | pa.RecordBatch | pa.Dataset | pd.DataFrame, |
363 |
| - table_name: str | None = None, |
364 |
| - **kwargs: Any, |
365 |
| - ) -> ir.Table: |
366 |
| - import pandas as pd |
367 |
| - |
368 |
| - if isinstance(source, (str, Path)): |
369 |
| - first = str(source) |
370 |
| - elif isinstance(source, pa.Table): |
371 |
| - self.con.deregister_table(table_name) |
372 |
| - self.con.register_record_batches(table_name, [source.to_batches()]) |
373 |
| - return self.table(table_name) |
374 |
| - elif isinstance(source, pa.RecordBatch): |
375 |
| - self.con.deregister_table(table_name) |
376 |
| - self.con.register_record_batches(table_name, [[source]]) |
377 |
| - return self.table(table_name) |
378 |
| - elif isinstance(source, ds.Dataset): |
379 |
| - self.con.deregister_table(table_name) |
380 |
| - self.con.register_dataset(table_name, source) |
381 |
| - return self.table(table_name) |
382 |
| - elif isinstance(source, pd.DataFrame): |
383 |
| - return self.register(pa.Table.from_pandas(source), table_name, **kwargs) |
384 |
| - else: |
385 |
| - raise ValueError("`source` must be either a string or a pathlib.Path") |
386 |
| - |
387 |
| - if first.startswith(("parquet://", "parq://")) or first.endswith( |
388 |
| - ("parq", "parquet") |
389 |
| - ): |
390 |
| - return self.read_parquet(source, table_name=table_name, **kwargs) |
391 |
| - elif first.startswith(("csv://", "txt://")) or first.endswith( |
392 |
| - ("csv", "tsv", "txt") |
393 |
| - ): |
394 |
| - return self.read_csv(source, table_name=table_name, **kwargs) |
395 |
| - else: |
396 |
| - self._register_failure() |
397 |
| - return None |
398 |
| - |
399 |
| - def _register_failure(self): |
400 |
| - import inspect |
401 |
| - |
402 |
| - msg = ", ".join( |
403 |
| - m[0] for m in inspect.getmembers(self) if m[0].startswith("read_") |
404 |
| - ) |
405 |
| - raise ValueError( |
406 |
| - f"Cannot infer appropriate read function for input, " |
407 |
| - f"please call one of {msg} directly" |
408 |
| - ) |
409 |
| - |
410 | 391 | def _register_in_memory_table(self, op: ops.InMemoryTable) -> None:
|
411 | 392 | # self.con.register_table is broken, so we do this roundabout thing
|
412 | 393 | # of constructing a datafusion DataFrame, which has a side effect
|
|
0 commit comments