You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Error on UI:
Error creating workflow run from prompt
Request failed with status code 500
Error from docker logs:
xxxx@xxxx:~/Desktop/skyvern$ sudo docker logs e665a018aba3
INFO [alembic.runtime.migration] Context impl PostgresqlImpl.
INFO [alembic.runtime.migration] Will assume transactional DDL.
Alembic mode: online
INFO [alembic.runtime.migration] Context impl PostgresqlImpl.
INFO [alembic.runtime.migration] Will assume transactional DDL.
Alembic mode: online
No new upgrade operations detected.
Creating organization and API token...
.streamlit/secrets.toml file updated with organization details.
Starting Xvfb...
2025-02-28T11:43:03.225782Z [info ] Agent server starting. host=0.0.0.0 port=8000
INFO: Will watch for changes in these directories: ['/app']
INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
INFO: Started reloader process [59] using WatchFiles
2025-02-28T11:43:13.747037Z [info ] Initializing ForgeAgent browser_action_timeout_ms=5000 browser_type=chromium-headful debug_mode=False env=local execute_all_steps=True long_running_task_warning_ratio=0.95 max_scraping_retries=0 max_steps_per_run=10 video_path=/data/videos
2025-02-28T11:45:53.019874Z [error ] LLM request failed unexpectedly llm_key=AZURE_OPENAI
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/litellm/main.py", line 466, in acompletion
response = await init_response
^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 660, in acompletion
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 611, in acompletion
headers, response = await self.make_azure_openai_chat_completion_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 324, in make_azure_openai_chat_completion_request
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 316, in make_azure_openai_chat_completion_request
raw_response = await azure_client.chat.completions.with_raw_response.create(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_legacy_response.py", line 381, in wrapped
return cast(LegacyAPIResponse[R], await func(*args, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/resources/chat/completions/completions.py", line 1927, in create
return await self._post(
^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_base_client.py", line 1856, in post
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_base_client.py", line 1550, in request
return await self._request(
^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_base_client.py", line 1651, in _request
raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'error': {'message': 'Unrecognized request argument supplied: max_completion_tokens', 'type': 'invalid_request_error', 'param': None, 'code': None}}
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/app/skyvern/forge/sdk/api/llm/api_handler_factory.py", line 297, in llm_api_handler
response = await litellm.acompletion(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/utils.py", line 1358, in wrapper_async
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/utils.py", line 1217, in wrapper_async
result = await original_function(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/main.py", line 485, in acompletion
raise exception_type(
^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 2201, in exception_type
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 1948, in exception_type
raise BadRequestError(
litellm.exceptions.BadRequestError: litellm.BadRequestError: AzureException BadRequestError - Unrecognized request argument supplied: max_completion_tokens
2025-02-28T11:45:53.031203Z [error ] LLM failure to initialize observer cruise
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/litellm/main.py", line 466, in acompletion
response = await init_response
^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 660, in acompletion
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 611, in acompletion
headers, response = await self.make_azure_openai_chat_completion_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 324, in make_azure_openai_chat_completion_request
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 316, in make_azure_openai_chat_completion_request
raw_response = await azure_client.chat.completions.with_raw_response.create(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_legacy_response.py", line 381, in wrapped
return cast(LegacyAPIResponse[R], await func(*args, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/resources/chat/completions/completions.py", line 1927, in create
return await self._post(
^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_base_client.py", line 1856, in post
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_base_client.py", line 1550, in request
return await self._request(
^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_base_client.py", line 1651, in _request
raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'error': {'message': 'Unrecognized request argument supplied: max_completion_tokens', 'type': 'invalid_request_error', 'param': None, 'code': None}}
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/app/skyvern/forge/sdk/api/llm/api_handler_factory.py", line 297, in llm_api_handler
response = await litellm.acompletion(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/utils.py", line 1358, in wrapper_async
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/utils.py", line 1217, in wrapper_async
result = await original_function(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/main.py", line 485, in acompletion
raise exception_type(
^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 2201, in exception_type
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 1948, in exception_type
raise BadRequestError(
litellm.exceptions.BadRequestError: litellm.BadRequestError: AzureException BadRequestError - Unrecognized request argument supplied: max_completion_tokens
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/app/skyvern/forge/sdk/routes/agent_protocol.py", line 1237, in create_task_v2
task_v2 = await task_v2_service.initialize_task_v2(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/skyvern/forge/sdk/services/task_v2_service.py", line 125, in initialize_task_v2
metadata_response = await app.LLM_API_HANDLER(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/skyvern/forge/sdk/api/llm/api_handler_factory.py", line 316, in llm_api_handler
raise LLMProviderError(llm_key) from e
skyvern.forge.sdk.api.llm.exceptions.LLMProviderError: Error while using LLMProvider AZURE_OPENAI
Error on UI:
Error creating workflow run from prompt
Request failed with status code 500
Error from docker logs:
xxxx@xxxx:~/Desktop/skyvern$ sudo docker logs e665a018aba3
INFO [alembic.runtime.migration] Context impl PostgresqlImpl.
INFO [alembic.runtime.migration] Will assume transactional DDL.
Alembic mode: online
INFO [alembic.runtime.migration] Context impl PostgresqlImpl.
INFO [alembic.runtime.migration] Will assume transactional DDL.
Alembic mode: online
No new upgrade operations detected.
Creating organization and API token...
.streamlit/secrets.toml file updated with organization details.
Starting Xvfb...
2025-02-28T11:43:03.225782Z [info ] Agent server starting. host=0.0.0.0 port=8000
INFO: Will watch for changes in these directories: ['/app']
INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
INFO: Started reloader process [59] using WatchFiles
2025-02-28T11:43:13.747037Z [info ] Initializing ForgeAgent browser_action_timeout_ms=5000 browser_type=chromium-headful debug_mode=False env=local execute_all_steps=True long_running_task_warning_ratio=0.95 max_scraping_retries=0 max_steps_per_run=10 video_path=/data/videos
Give Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new
LiteLLM.Info: If you need to debug this error, use `litellm._turn_on_debug()'.
2025-02-28T11:45:53.019874Z [error ] LLM request failed unexpectedly llm_key=AZURE_OPENAI
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/litellm/main.py", line 466, in acompletion
response = await init_response
^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 660, in acompletion
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 611, in acompletion
headers, response = await self.make_azure_openai_chat_completion_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 324, in make_azure_openai_chat_completion_request
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 316, in make_azure_openai_chat_completion_request
raw_response = await azure_client.chat.completions.with_raw_response.create(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_legacy_response.py", line 381, in wrapped
return cast(LegacyAPIResponse[R], await func(*args, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/resources/chat/completions/completions.py", line 1927, in create
return await self._post(
^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_base_client.py", line 1856, in post
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_base_client.py", line 1550, in request
return await self._request(
^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_base_client.py", line 1651, in _request
raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'error': {'message': 'Unrecognized request argument supplied: max_completion_tokens', 'type': 'invalid_request_error', 'param': None, 'code': None}}
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/app/skyvern/forge/sdk/api/llm/api_handler_factory.py", line 297, in llm_api_handler
response = await litellm.acompletion(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/utils.py", line 1358, in wrapper_async
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/utils.py", line 1217, in wrapper_async
result = await original_function(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/main.py", line 485, in acompletion
raise exception_type(
^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 2201, in exception_type
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 1948, in exception_type
raise BadRequestError(
litellm.exceptions.BadRequestError: litellm.BadRequestError: AzureException BadRequestError - Unrecognized request argument supplied: max_completion_tokens
2025-02-28T11:45:53.031203Z [error ] LLM failure to initialize observer cruise
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/litellm/main.py", line 466, in acompletion
response = await init_response
^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 660, in acompletion
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 611, in acompletion
headers, response = await self.make_azure_openai_chat_completion_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 324, in make_azure_openai_chat_completion_request
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/llms/azure/azure.py", line 316, in make_azure_openai_chat_completion_request
raw_response = await azure_client.chat.completions.with_raw_response.create(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_legacy_response.py", line 381, in wrapped
return cast(LegacyAPIResponse[R], await func(*args, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/resources/chat/completions/completions.py", line 1927, in create
return await self._post(
^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_base_client.py", line 1856, in post
return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_base_client.py", line 1550, in request
return await self._request(
^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/openai/_base_client.py", line 1651, in _request
raise self._make_status_error_from_response(err.response) from None
openai.BadRequestError: Error code: 400 - {'error': {'message': 'Unrecognized request argument supplied: max_completion_tokens', 'type': 'invalid_request_error', 'param': None, 'code': None}}
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/app/skyvern/forge/sdk/api/llm/api_handler_factory.py", line 297, in llm_api_handler
response = await litellm.acompletion(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/utils.py", line 1358, in wrapper_async
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/utils.py", line 1217, in wrapper_async
result = await original_function(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/main.py", line 485, in acompletion
raise exception_type(
^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 2201, in exception_type
raise e
File "/usr/local/lib/python3.11/site-packages/litellm/litellm_core_utils/exception_mapping_utils.py", line 1948, in exception_type
raise BadRequestError(
litellm.exceptions.BadRequestError: litellm.BadRequestError: AzureException BadRequestError - Unrecognized request argument supplied: max_completion_tokens
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/app/skyvern/forge/sdk/routes/agent_protocol.py", line 1237, in create_task_v2
task_v2 = await task_v2_service.initialize_task_v2(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/skyvern/forge/sdk/services/task_v2_service.py", line 125, in initialize_task_v2
metadata_response = await app.LLM_API_HANDLER(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/skyvern/forge/sdk/api/llm/api_handler_factory.py", line 316, in llm_api_handler
raise LLMProviderError(llm_key) from e
skyvern.forge.sdk.api.llm.exceptions.LLMProviderError: Error while using LLMProvider AZURE_OPENAI
docker-compose.yml:
target URI:
https://xxxx.openai.azure.com/openai/deployments/gpt-4/chat/completions?api-version=2025-01-01-preview
The text was updated successfully, but these errors were encountered: