Bump openai end switch from dall-e-2 to dall-e-3 (#104998)

* Bump openai

* Fix tests

* Apply suggestions from code review

* Undo conftest changes

* Raise repasir issue

* Explicitly use async mock for chat.completions.create

It is not always detected correctly as async because it uses a decorator

* removed duplicated message

* ruff

* Compatibility with old pydantic versions

* Compatibility with old pydantic versions

* More tests

* Apply suggestions from code review

Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>

* Apply suggestions from code review

---------

Co-authored-by: Paulus Schoutsen <paulus@home-assistant.io>
This commit is contained in:
Denis Shulyaka 2023-12-11 17:47:26 +03:00 committed by GitHub
parent c0314cd05c
commit 1242456ff1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 269 additions and 71 deletions

View file

@ -1,7 +1,18 @@
"""Tests for the OpenAI integration."""
from unittest.mock import patch
from unittest.mock import AsyncMock, patch
from openai import error
from httpx import Response
from openai import (
APIConnectionError,
AuthenticationError,
BadRequestError,
RateLimitError,
)
from openai.types.chat.chat_completion import ChatCompletion, Choice
from openai.types.chat.chat_completion_message import ChatCompletionMessage
from openai.types.completion_usage import CompletionUsage
from openai.types.image import Image
from openai.types.images_response import ImagesResponse
import pytest
from syrupy.assertion import SnapshotAssertion
@ -9,6 +20,7 @@ from homeassistant.components import conversation
from homeassistant.core import Context, HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import area_registry as ar, device_registry as dr, intent
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
@ -94,17 +106,30 @@ async def test_default_prompt(
suggested_area="Test Area 2",
)
with patch(
"openai.ChatCompletion.acreate",
return_value={
"choices": [
{
"message": {
"role": "assistant",
"content": "Hello, how can I help you?",
}
}
]
},
"openai.resources.chat.completions.AsyncCompletions.create",
new_callable=AsyncMock,
return_value=ChatCompletion(
id="chatcmpl-1234567890ABCDEFGHIJKLMNOPQRS",
choices=[
Choice(
finish_reason="stop",
index=0,
message=ChatCompletionMessage(
content="Hello, how can I help you?",
role="assistant",
function_call=None,
tool_calls=None,
),
)
],
created=1700000000,
model="gpt-3.5-turbo-0613",
object="chat.completion",
system_fingerprint=None,
usage=CompletionUsage(
completion_tokens=9, prompt_tokens=8, total_tokens=17
),
),
) as mock_create:
result = await conversation.async_converse(
hass, "hello", None, Context(), agent_id=mock_config_entry.entry_id
@ -119,7 +144,11 @@ async def test_error_handling(
) -> None:
"""Test that the default prompt works."""
with patch(
"openai.ChatCompletion.acreate", side_effect=error.ServiceUnavailableError
"openai.resources.chat.completions.AsyncCompletions.create",
new_callable=AsyncMock,
side_effect=RateLimitError(
response=Response(status_code=None, request=""), body=None, message=None
),
):
result = await conversation.async_converse(
hass, "hello", None, Context(), agent_id=mock_config_entry.entry_id
@ -140,8 +169,11 @@ async def test_template_error(
},
)
with patch(
"openai.Model.list",
), patch("openai.ChatCompletion.acreate"):
"openai.resources.models.AsyncModels.list",
), patch(
"openai.resources.chat.completions.AsyncCompletions.create",
new_callable=AsyncMock,
):
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
result = await conversation.async_converse(
@ -169,15 +201,67 @@ async def test_conversation_agent(
[
(
{"prompt": "Picture of a dog"},
{"prompt": "Picture of a dog", "size": "512x512"},
{
"prompt": "Picture of a dog",
"size": "1024x1024",
"quality": "standard",
"style": "vivid",
},
),
(
{
"prompt": "Picture of a dog",
"size": "1024x1792",
"quality": "hd",
"style": "vivid",
},
{
"prompt": "Picture of a dog",
"size": "1024x1792",
"quality": "hd",
"style": "vivid",
},
),
(
{
"prompt": "Picture of a dog",
"size": "1792x1024",
"quality": "standard",
"style": "natural",
},
{
"prompt": "Picture of a dog",
"size": "1792x1024",
"quality": "standard",
"style": "natural",
},
),
(
{"prompt": "Picture of a dog", "size": "256"},
{"prompt": "Picture of a dog", "size": "256x256"},
{
"prompt": "Picture of a dog",
"size": "1024x1024",
"quality": "standard",
"style": "vivid",
},
),
(
{"prompt": "Picture of a dog", "size": "512"},
{
"prompt": "Picture of a dog",
"size": "1024x1024",
"quality": "standard",
"style": "vivid",
},
),
(
{"prompt": "Picture of a dog", "size": "1024"},
{"prompt": "Picture of a dog", "size": "1024x1024"},
{
"prompt": "Picture of a dog",
"size": "1024x1024",
"quality": "standard",
"style": "vivid",
},
),
],
)
@ -190,11 +274,22 @@ async def test_generate_image_service(
) -> None:
"""Test generate image service."""
service_data["config_entry"] = mock_config_entry.entry_id
expected_args["api_key"] = mock_config_entry.data["api_key"]
expected_args["model"] = "dall-e-3"
expected_args["response_format"] = "url"
expected_args["n"] = 1
with patch(
"openai.Image.acreate", return_value={"data": [{"url": "A"}]}
"openai.resources.images.AsyncImages.generate",
return_value=ImagesResponse(
created=1700000000,
data=[
Image(
b64_json=None,
revised_prompt="A clear and detailed picture of an ordinary canine",
url="A",
)
],
),
) as mock_create:
response = await hass.services.async_call(
"openai_conversation",
@ -204,7 +299,10 @@ async def test_generate_image_service(
return_response=True,
)
assert response == {"url": "A"}
assert response == {
"url": "A",
"revised_prompt": "A clear and detailed picture of an ordinary canine",
}
assert len(mock_create.mock_calls) == 1
assert mock_create.mock_calls[0][2] == expected_args
@ -216,7 +314,10 @@ async def test_generate_image_service_error(
) -> None:
"""Test generate image service handles errors."""
with patch(
"openai.Image.acreate", side_effect=error.ServiceUnavailableError("Reason")
"openai.resources.images.AsyncImages.generate",
side_effect=RateLimitError(
response=Response(status_code=None, request=""), body=None, message="Reason"
),
), pytest.raises(HomeAssistantError, match="Error generating image: Reason"):
await hass.services.async_call(
"openai_conversation",
@ -228,3 +329,34 @@ async def test_generate_image_service_error(
blocking=True,
return_response=True,
)
@pytest.mark.parametrize(
("side_effect", "error"),
[
(APIConnectionError(request=None), "Connection error"),
(
AuthenticationError(
response=Response(status_code=None, request=""), body=None, message=None
),
"Invalid API key",
),
(
BadRequestError(
response=Response(status_code=None, request=""), body=None, message=None
),
"openai_conversation integration not ready yet: None",
),
],
)
async def test_init_error(
hass: HomeAssistant, mock_config_entry: MockConfigEntry, caplog, side_effect, error
) -> None:
"""Test initialization errors."""
with patch(
"openai.resources.models.AsyncModels.list",
side_effect=side_effect,
):
assert await async_setup_component(hass, "openai_conversation", {})
await hass.async_block_till_done()
assert error in caplog.text