Skip to content

TypeError: 'ellipsis' object is not iterable #899

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
istvancsabakis opened this issue May 2, 2025 · 2 comments
Open

TypeError: 'ellipsis' object is not iterable #899

istvancsabakis opened this issue May 2, 2025 · 2 comments

Comments

@istvancsabakis
Copy link

istvancsabakis commented May 2, 2025

Steps to Reproduce:

from langchain.chat_models import init_chat_model
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers.string import StrOutputParser

llm = init_chat_model(model="gemini-1.5-flash")
llm.invoke('...')

Expected Behavior:

Return an AIMessage

Actual Behavior (Error):

---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
Cell In[19], line 1
----> 1 llm.invoke('...')

File /venv/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:307, in BaseChatModel.invoke(self, input, config, stop, **kwargs)
    296 def invoke(
    297     self,
    298     input: LanguageModelInput,
   (...)    302     **kwargs: Any,
    303 ) -> BaseMessage:
    304     config = ensure_config(config)
    305     return cast(
    306         ChatGeneration,
--> 307         self.generate_prompt(
    308             [self._convert_input(input)],
    309             stop=stop,
    310             callbacks=config.get("callbacks"),
    311             tags=config.get("tags"),
    312             metadata=config.get("metadata"),
    313             run_name=config.get("run_name"),
    314             run_id=config.pop("run_id", None),
    315             **kwargs,
    316         ).generations[0][0],
    317     ).message

File /venv/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:843, in BaseChatModel.generate_prompt(self, prompts, stop, callbacks, **kwargs)
    835 def generate_prompt(
    836     self,
    837     prompts: list[PromptValue],
   (...)    840     **kwargs: Any,
    841 ) -> LLMResult:
    842     prompt_messages = [p.to_messages() for p in prompts]
--> 843     return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)

File /venv/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:683, in BaseChatModel.generate(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)
    680 for i, m in enumerate(messages):
    681     try:
    682         results.append(
--> 683             self._generate_with_cache(
    684                 m,
    685                 stop=stop,
    686                 run_manager=run_managers[i] if run_managers else None,
    687                 **kwargs,
    688             )
    689         )
    690     except BaseException as e:
    691         if run_managers:

File /venv/lib/python3.12/site-packages/langchain_core/language_models/chat_models.py:908, in BaseChatModel._generate_with_cache(self, messages, stop, run_manager, **kwargs)
    906 else:
    907     if inspect.signature(self._generate).parameters.get("run_manager"):
--> 908         result = self._generate(
    909             messages, stop=stop, run_manager=run_manager, **kwargs
    910         )
    911     else:
    912         result = self._generate(messages, stop=stop, **kwargs)

File /venv/lib/python3.12/site-packages/langchain_google_vertexai/chat_models.py:1286, in ChatVertexAI._generate(self, messages, stop, run_manager, stream, **kwargs)
   1284 if not self._is_gemini_model:
   1285     return self._generate_non_gemini(messages, stop=stop, **kwargs)
-> 1286 return self._generate_gemini(
   1287     messages=messages,
   1288     stop=stop,
   1289     run_manager=run_manager,
   1290     is_gemini=True,
   1291     **kwargs,
   1292 )

File /venv/lib/python3.12/site-packages/langchain_google_vertexai/chat_models.py:1521, in ChatVertexAI._generate_gemini(self, messages, stop, run_manager, **kwargs)
   1514 def _generate_gemini(
   1515     self,
   1516     messages: List[BaseMessage],
   (...)   1519     **kwargs: Any,
   1520 ) -> ChatResult:
-> 1521     request = self._prepare_request_gemini(messages=messages, stop=stop, **kwargs)
   1522     response = _completion_with_retry(
   1523         self.prediction_client.generate_content,
   1524         max_retries=self.max_retries,
   (...)   1527         **kwargs,
   1528     )
   1529     return self._gemini_response_to_chat_result(response)

File /venv/lib/python3.12/site-packages/langchain_google_vertexai/chat_models.py:1376, in ChatVertexAI._prepare_request_gemini(self, messages, stop, stream, tools, functions, tool_config, safety_settings, cached_content, tool_choice, logprobs, **kwargs)
   1361 def _prepare_request_gemini(
   1362     self,
   1363     messages: List[BaseMessage],
   (...)   1374     **kwargs,
   1375 ) -> Union[v1GenerateContentRequest, GenerateContentRequest]:
-> 1376     system_instruction, contents = _parse_chat_history_gemini(
   1377         messages,
   1378         self._image_bytes_loader_client,
   1379         perform_literal_eval_on_string_raw_content=self.perform_literal_eval_on_string_raw_content,
   1380     )
   1381     formatted_tools = self._tools_gemini(tools=tools, functions=functions)
   1382     if tool_config:

File /venv/lib/python3.12/site-packages/langchain_google_vertexai/chat_models.py:319, in _parse_chat_history_gemini(history, imageBytesLoader, convert_system_message_to_human, perform_literal_eval_on_string_raw_content)
    317 prev_ai_message = None
    318 role = "user"
--> 319 parts = _convert_to_parts(message)
    320 if system_parts is not None:
    321     parts = system_parts + parts

File /venv/lib/python3.12/site-packages/langchain_google_vertexai/chat_models.py:286, in _parse_chat_history_gemini.<locals>._convert_to_parts(message)
    284     raw_content = [raw_content]
    285 result = []
--> 286 for raw_part in raw_content:
    287     part = _convert_to_prompt(raw_part)
    288     if part:

TypeError: 'ellipsis' object is not iterable

Environment:

  • LangChain Version: 0.3.20
  • langchain-google-vertexai Version: 2.0.15
  • Python Version: 3.12
@lkuligin
Copy link
Collaborator

lkuligin commented May 7, 2025

could you provide your prompt that reproduces the error, please?

@istvancsabakis
Copy link
Author

"..." as it's specified in example

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants