Skip to content

Commit 42944f3

Browse files
cborneteyurtsev
andauthored
core: Improve mypy config (#30737)
* Cleanup mypy config * Add mypy `strict` rules except `disallow_any_generics`, `warn_return_any` and `strict_equality` (TODO) * Add mypy `strict_byte` rule * Add mypy support for PEP702 `@deprecated` decorator * Bump mypy version to 1.15 --------- Co-authored-by: Eugene Yurtsev <[email protected]>
1 parent bb2c2fd commit 42944f3

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+223
-238
lines changed

libs/core/langchain_core/_api/beta_decorator.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -225,10 +225,8 @@ def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
225225
new_doc = f".. beta::\n {details}\n\n{old_doc}\n"
226226

227227
if inspect.iscoroutinefunction(obj):
228-
finalized = finalize(awarning_emitting_wrapper, new_doc)
229-
else:
230-
finalized = finalize(warning_emitting_wrapper, new_doc)
231-
return cast("T", finalized)
228+
return finalize(awarning_emitting_wrapper, new_doc)
229+
return finalize(warning_emitting_wrapper, new_doc)
232230

233231
return beta
234232

libs/core/langchain_core/_api/deprecation.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,10 @@ def deprecate(
152152
_package: str = package,
153153
) -> T:
154154
"""Implementation of the decorator returned by `deprecated`."""
155-
from langchain_core.utils.pydantic import FieldInfoV1, FieldInfoV2
155+
from langchain_core.utils.pydantic import ( # type: ignore[attr-defined]
156+
FieldInfoV1,
157+
FieldInfoV2,
158+
)
156159

157160
def emit_warning() -> None:
158161
"""Emit the warning."""
@@ -395,10 +398,8 @@ def finalize(wrapper: Callable[..., Any], new_doc: str) -> T:
395398
"""
396399

397400
if inspect.iscoroutinefunction(obj):
398-
finalized = finalize(awarning_emitting_wrapper, new_doc)
399-
else:
400-
finalized = finalize(warning_emitting_wrapper, new_doc)
401-
return cast("T", finalized)
401+
return finalize(awarning_emitting_wrapper, new_doc)
402+
return finalize(warning_emitting_wrapper, new_doc)
402403

403404
return deprecate
404405

libs/core/langchain_core/callbacks/manager.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2408,7 +2408,7 @@ def _configure(
24082408
run_tree.trace_id,
24092409
run_tree.dotted_order,
24102410
)
2411-
handler.run_map[str(run_tree.id)] = cast("Run", run_tree)
2411+
handler.run_map[str(run_tree.id)] = run_tree
24122412
for var, inheritable, handler_class, env_var in _configure_hooks:
24132413
create_one = (
24142414
env_var is not None

libs/core/langchain_core/document_loaders/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ async def alazy_load(self) -> AsyncIterator[Document]:
8080
iterator = await run_in_executor(None, self.lazy_load)
8181
done = object()
8282
while True:
83-
doc = await run_in_executor(None, next, iterator, done) # type: ignore[call-arg, arg-type]
83+
doc = await run_in_executor(None, next, iterator, done)
8484
if doc is done:
8585
break
8686
yield doc # type: ignore[misc]

libs/core/langchain_core/embeddings/fake.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ class FakeEmbeddings(Embeddings, BaseModel):
5252
"""The size of the embedding vector."""
5353

5454
def _get_embedding(self) -> list[float]:
55-
import numpy as np # type: ignore[import-not-found, import-untyped]
55+
import numpy as np
5656

5757
return list(np.random.default_rng().normal(size=self.size))
5858

@@ -109,7 +109,7 @@ class DeterministicFakeEmbedding(Embeddings, BaseModel):
109109
"""The size of the embedding vector."""
110110

111111
def _get_embedding(self, seed: int) -> list[float]:
112-
import numpy as np # type: ignore[import-not-found, import-untyped]
112+
import numpy as np
113113

114114
# set the seed for the random generator
115115
rng = np.random.default_rng(seed)

libs/core/langchain_core/globals.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def set_verbose(value: bool) -> None: # noqa: FBT001
2323
value: The new value for the `verbose` global setting.
2424
"""
2525
try:
26-
import langchain # type: ignore[import]
26+
import langchain # type: ignore[import-not-found]
2727

2828
# We're about to run some deprecated code, don't report warnings from it.
2929
# The user called the correct (non-deprecated) code path and shouldn't get
@@ -57,7 +57,7 @@ def get_verbose() -> bool:
5757
The value of the `verbose` global setting.
5858
"""
5959
try:
60-
import langchain # type: ignore[import]
60+
import langchain
6161

6262
# We're about to run some deprecated code, don't report warnings from it.
6363
# The user called the correct (non-deprecated) code path and shouldn't get
@@ -96,7 +96,7 @@ def set_debug(value: bool) -> None: # noqa: FBT001
9696
value: The new value for the `debug` global setting.
9797
"""
9898
try:
99-
import langchain # type: ignore[import]
99+
import langchain
100100

101101
# We're about to run some deprecated code, don't report warnings from it.
102102
# The user called the correct (non-deprecated) code path and shouldn't get
@@ -128,7 +128,7 @@ def get_debug() -> bool:
128128
The value of the `debug` global setting.
129129
"""
130130
try:
131-
import langchain # type: ignore[import]
131+
import langchain
132132

133133
# We're about to run some deprecated code, don't report warnings from it.
134134
# The user called the correct (non-deprecated) code path and shouldn't get
@@ -164,7 +164,7 @@ def set_llm_cache(value: Optional["BaseCache"]) -> None:
164164
value: The new LLM cache to use. If `None`, the LLM cache is disabled.
165165
"""
166166
try:
167-
import langchain # type: ignore[import]
167+
import langchain
168168

169169
# We're about to run some deprecated code, don't report warnings from it.
170170
# The user called the correct (non-deprecated) code path and shouldn't get
@@ -198,7 +198,7 @@ def get_llm_cache() -> "BaseCache":
198198
The value of the `llm_cache` global setting.
199199
"""
200200
try:
201-
import langchain # type: ignore[import]
201+
import langchain
202202

203203
# We're about to run some deprecated code, don't report warnings from it.
204204
# The user called the correct (non-deprecated) code path and shouldn't get

libs/core/langchain_core/indexing/api.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -394,7 +394,7 @@ def index(
394394
if cleanup == "scoped_full":
395395
scoped_full_cleanup_source_ids.add(source_id)
396396
# source ids cannot be None after for loop above.
397-
source_ids = cast("Sequence[str]", source_ids) # type: ignore[assignment]
397+
source_ids = cast("Sequence[str]", source_ids)
398398

399399
exists_batch = record_manager.exists([doc.uid for doc in hashed_docs])
400400

libs/core/langchain_core/language_models/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def get_tokenizer() -> Any:
6161
every time it is called.
6262
"""
6363
try:
64-
from transformers import GPT2TokenizerFast # type: ignore[import]
64+
from transformers import GPT2TokenizerFast # type: ignore[import-not-found]
6565
except ImportError as e:
6666
msg = (
6767
"Could not import transformers python package. "

libs/core/langchain_core/language_models/chat_models.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -853,7 +853,7 @@ async def agenerate(
853853
run_manager.on_llm_end(
854854
LLMResult(
855855
generations=[res.generations], # type: ignore[list-item, union-attr]
856-
llm_output=res.llm_output, # type: ignore[list-item, union-attr]
856+
llm_output=res.llm_output, # type: ignore[union-attr]
857857
)
858858
)
859859
for run_manager, res in zip(run_managers, results)
@@ -1107,7 +1107,7 @@ async def _astream(
11071107
None,
11081108
next,
11091109
iterator,
1110-
done, # type: ignore[call-arg, arg-type]
1110+
done,
11111111
)
11121112
if item is done:
11131113
break

libs/core/langchain_core/language_models/llms.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -455,7 +455,7 @@ def batch(
455455
inputs[i : i + max_concurrency]
456456
for i in range(0, len(inputs), max_concurrency)
457457
]
458-
config = [{**c, "max_concurrency": None} for c in config] # type: ignore[misc]
458+
config = [{**c, "max_concurrency": None} for c in config]
459459
return [
460460
output
461461
for i, batch in enumerate(batches)
@@ -501,7 +501,7 @@ async def abatch(
501501
inputs[i : i + max_concurrency]
502502
for i in range(0, len(inputs), max_concurrency)
503503
]
504-
config = [{**c, "max_concurrency": None} for c in config] # type: ignore[misc]
504+
config = [{**c, "max_concurrency": None} for c in config]
505505
return [
506506
output
507507
for i, batch in enumerate(batches)
@@ -746,7 +746,7 @@ async def _astream(
746746
None,
747747
next,
748748
iterator,
749-
done, # type: ignore[call-arg, arg-type]
749+
done,
750750
)
751751
if item is done:
752752
break
@@ -1231,7 +1231,7 @@ async def agenerate(
12311231
stop,
12321232
run_managers, # type: ignore[arg-type]
12331233
new_arg_supported=bool(new_arg_supported),
1234-
**kwargs, # type: ignore[arg-type]
1234+
**kwargs,
12351235
)
12361236
if len(missing_prompts) > 0:
12371237
run_managers = await asyncio.gather(
@@ -1253,7 +1253,7 @@ async def agenerate(
12531253
stop,
12541254
run_managers, # type: ignore[arg-type]
12551255
new_arg_supported=bool(new_arg_supported),
1256-
**kwargs, # type: ignore[arg-type]
1256+
**kwargs,
12571257
)
12581258
llm_output = await aupdate_cache(
12591259
self.cache,

0 commit comments

Comments
 (0)