Testing AI Chat Model Integration in gpt-engineer
This test suite validates the core AI interaction functionality in gpt-engineer, focusing on chat model responses and token usage tracking. It ensures proper initialization, message handling, and usage monitoring for the AI component.
Test Coverage Overview
Implementation Analysis
Technical Details
Best Practices Demonstrated
gpt-engineer-org/gpt-engineer
tests/core/test_ai.py
from langchain.chat_models.base import BaseChatModel
from langchain_community.chat_models.fake import FakeListChatModel
from gpt_engineer.core.ai import AI
def mock_create_chat_model(self) -> BaseChatModel:
return FakeListChatModel(responses=["response1", "response2", "response3"])
def test_start(monkeypatch):
monkeypatch.setattr(AI, "_create_chat_model", mock_create_chat_model)
ai = AI("gpt-4")
# act
response_messages = ai.start("system prompt", "user prompt", step_name="step name")
# assert
assert response_messages[-1].content == "response1"
def test_next(monkeypatch):
# arrange
monkeypatch.setattr(AI, "_create_chat_model", mock_create_chat_model)
ai = AI("gpt-4")
response_messages = ai.start("system prompt", "user prompt", step_name="step name")
# act
response_messages = ai.next(
response_messages, "next user prompt", step_name="step name"
)
# assert
assert response_messages[-1].content == "response2"
def test_token_logging(monkeypatch):
# arrange
monkeypatch.setattr(AI, "_create_chat_model", mock_create_chat_model)
ai = AI("gpt-4")
# act
response_messages = ai.start("system prompt", "user prompt", step_name="step name")
usageCostAfterStart = ai.token_usage_log.usage_cost()
ai.next(response_messages, "next user prompt", step_name="step name")
usageCostAfterNext = ai.token_usage_log.usage_cost()
# assert
assert usageCostAfterStart > 0
assert usageCostAfterNext > usageCostAfterStart