Testing CondenseQuestion Chat Engine Implementation in llama_index
This test suite validates the CondenseQuestionChatEngine functionality in llama_index, focusing on chat message handling and history management. It ensures proper question condensation and chat history tracking across multiple interactions.
Test Coverage Overview
Implementation Analysis
Technical Details
Best Practices Demonstrated
run-llama/llama_index
llama-index-core/tests/chat_engine/test_condense_question.py
from unittest.mock import Mock
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.base.response.schema import Response
from llama_index.core.chat_engine.condense_question import (
CondenseQuestionChatEngine,
)
def test_condense_question_chat_engine(patch_llm_predictor) -> None:
query_engine = Mock(spec=BaseQueryEngine)
query_engine.query.side_effect = lambda x: Response(response=x)
engine = CondenseQuestionChatEngine.from_defaults(query_engine=query_engine)
engine.reset()
response = engine.chat("Test message 1")
assert str(response) == "Test message 1"
response = engine.chat("Test message 2")
assert str(response) == (
"{"
"'question': 'Test message 2', "
"'chat_history': 'user: Test message 1\
assistant: Test message 1'"
"}"
)
engine.reset()
response = engine.chat("Test message 3")
assert str(response) == "Test message 3"
def test_condense_question_chat_engine_with_init_history(patch_llm_predictor) -> None:
query_engine = Mock(spec=BaseQueryEngine)
query_engine.query.side_effect = lambda x: Response(response=x)
engine = CondenseQuestionChatEngine.from_defaults(
query_engine=query_engine,
chat_history=[
ChatMessage(role=MessageRole.USER, content="test human message"),
ChatMessage(role=MessageRole.ASSISTANT, content="test ai message"),
],
)
response = engine.chat("new human message")
assert str(response) == (
"{'question': 'new human message', 'chat_history': 'user: test human "
"message\
assistant: test ai message'}"
)