## Added
- conftest.py: Shared fixtures and mocks
- test_core/test_config.py: 35 tests for Settings
- test_core/test_logging.py: 15 tests for logging
- test_api/test_chat.py: 27 tests for chat endpoints
- test_api/test_health.py: 27 tests for health endpoints
- test_services/test_document_service.py: 38 tests
- test_services/test_rag_service.py: 66 tests
- test_services/test_vector_store.py: 32 tests
## Coverage
- auth.py: 100%
- config.py: 100%
- logging.py: 100%
- chat.py: 100%
- health.py: 100%
- document_service.py: 96%
- rag_service.py: 100%
- vector_store.py: 100%
Total: 240 tests passing, 64% coverage
🧪 Core functionality fully tested
271 lines
7.4 KiB
Python
271 lines
7.4 KiB
Python
"""Shared fixtures for AgenticRAG unit tests."""
|
|
|
|
import pytest
|
|
from unittest.mock import Mock, AsyncMock, MagicMock, patch
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
# Add src to path
|
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent.parent / "src"))
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_settings():
|
|
"""Create a mock Settings object."""
|
|
settings = Mock()
|
|
settings.app_name = "AgenticRAG"
|
|
settings.app_version = "2.0.0"
|
|
settings.debug = True
|
|
settings.cors_origins = ["http://localhost:3000"]
|
|
settings.jwt_secret = "test-secret"
|
|
settings.jwt_algorithm = "HS256"
|
|
settings.access_token_expire_minutes = 30
|
|
settings.admin_api_key = "test-admin-key"
|
|
settings.qdrant_host = "localhost"
|
|
settings.qdrant_port = 6333
|
|
settings.max_file_size = 10 * 1024 * 1024
|
|
settings.upload_dir = "./uploads"
|
|
settings.default_llm_provider = "openai"
|
|
settings.default_llm_model = "gpt-4o-mini"
|
|
settings.openai_api_key = "test-openai-key"
|
|
settings.zai_api_key = "test-zai-key"
|
|
settings.opencode_zen_api_key = "test-opencode-key"
|
|
settings.openrouter_api_key = "test-openrouter-key"
|
|
settings.anthropic_api_key = "test-anthropic-key"
|
|
settings.google_api_key = "test-google-key"
|
|
settings.mistral_api_key = "test-mistral-key"
|
|
settings.azure_api_key = "test-azure-key"
|
|
settings.azure_endpoint = "https://test.azure.com"
|
|
settings.azure_api_version = "2024-02-01"
|
|
settings.embedding_provider = "openai"
|
|
settings.embedding_model = "text-embedding-3-small"
|
|
settings.embedding_api_key = "test-embedding-key"
|
|
settings.redis_url = "redis://localhost:6379/0"
|
|
return settings
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_qdrant_client():
|
|
"""Create a mock QdrantVectorstore client."""
|
|
client = Mock()
|
|
client.create_collection = Mock(return_value=None)
|
|
client.search = Mock(
|
|
return_value=[
|
|
{"id": "1", "text": "Test chunk 1", "score": 0.95},
|
|
{"id": "2", "text": "Test chunk 2", "score": 0.85},
|
|
]
|
|
)
|
|
client.get_collection = Mock(return_value={"name": "documents", "vectors_count": 100})
|
|
return client
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_embedder():
|
|
"""Create a mock embedder."""
|
|
embedder = Mock()
|
|
embedder.embed = Mock(return_value=[0.1] * 1536)
|
|
embedder.aembed = AsyncMock(return_value=[0.1] * 1536)
|
|
return embedder
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_llm_client():
|
|
"""Create a mock LLM client."""
|
|
client = Mock()
|
|
client.invoke = AsyncMock(
|
|
return_value=Mock(
|
|
text="Test response",
|
|
model="gpt-4o-mini",
|
|
usage={"prompt_tokens": 100, "completion_tokens": 50},
|
|
)
|
|
)
|
|
return client
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_ingestion_pipeline():
|
|
"""Create a mock IngestionPipeline."""
|
|
pipeline = Mock()
|
|
pipeline.run = Mock(
|
|
return_value=[
|
|
{"id": "1", "text": "Chunk 1"},
|
|
{"id": "2", "text": "Chunk 2"},
|
|
]
|
|
)
|
|
return pipeline
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_openai_embedder():
|
|
"""Create a mock OpenAIEmbedder."""
|
|
embedder = Mock()
|
|
embedder.embed = Mock(return_value=[0.1] * 1536)
|
|
embedder.aembed = AsyncMock(return_value=[0.1] * 1536)
|
|
return embedder
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_chunk_embedder():
|
|
"""Create a mock ChunkEmbedder."""
|
|
embedder = Mock()
|
|
return embedder
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_docling_parser():
|
|
"""Create a mock DoclingParser."""
|
|
parser = Mock()
|
|
return parser
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_node_splitter():
|
|
"""Create a mock NodeSplitter."""
|
|
splitter = Mock()
|
|
return splitter
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def reset_singletons():
|
|
"""Reset singleton instances before each test."""
|
|
# Only reset modules that can be imported
|
|
try:
|
|
from agentic_rag.core import config
|
|
|
|
config._settings = None
|
|
except ImportError:
|
|
pass
|
|
|
|
try:
|
|
from agentic_rag.services import vector_store
|
|
|
|
vector_store._vector_store = None
|
|
except ImportError:
|
|
pass
|
|
|
|
try:
|
|
from agentic_rag.services import document_service
|
|
|
|
document_service._document_service = None
|
|
except ImportError:
|
|
pass
|
|
|
|
try:
|
|
from agentic_rag.services import rag_service
|
|
|
|
rag_service._rag_service = None
|
|
except ImportError:
|
|
pass
|
|
|
|
try:
|
|
from agentic_rag.core import llm_factory
|
|
|
|
llm_factory._client_cache.clear()
|
|
except ImportError:
|
|
pass
|
|
|
|
yield
|
|
|
|
# Reset after test as well
|
|
try:
|
|
from agentic_rag.core import config
|
|
|
|
config._settings = None
|
|
except ImportError:
|
|
pass
|
|
|
|
try:
|
|
from agentic_rag.services import vector_store
|
|
|
|
vector_store._vector_store = None
|
|
except ImportError:
|
|
pass
|
|
|
|
try:
|
|
from agentic_rag.services import document_service
|
|
|
|
document_service._document_service = None
|
|
except ImportError:
|
|
pass
|
|
|
|
try:
|
|
from agentic_rag.services import rag_service
|
|
|
|
rag_service._rag_service = None
|
|
except ImportError:
|
|
pass
|
|
|
|
try:
|
|
from agentic_rag.core import llm_factory
|
|
|
|
llm_factory._client_cache.clear()
|
|
except ImportError:
|
|
pass
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def mock_datapizza_modules():
|
|
"""Mock datapizza modules that may not be installed."""
|
|
# Create mock modules
|
|
mock_datapizza = Mock()
|
|
mock_vectorstores = Mock()
|
|
mock_qdrant = Mock()
|
|
mock_qdrant.QdrantVectorstore = Mock()
|
|
mock_vectorstores.qdrant = mock_qdrant
|
|
mock_datapizza.vectorstores = mock_vectorstores
|
|
|
|
mock_embedders = Mock()
|
|
mock_embedders.ChunkEmbedder = Mock()
|
|
mock_openai_embedder_module = Mock()
|
|
mock_openai_embedder_module.OpenAIEmbedder = Mock()
|
|
mock_embedders.openai = mock_openai_embedder_module
|
|
mock_datapizza.embedders = mock_embedders
|
|
|
|
mock_modules = Mock()
|
|
mock_parsers = Mock()
|
|
mock_docling = Mock()
|
|
mock_docling.DoclingParser = Mock()
|
|
mock_parsers.docling = mock_docling
|
|
mock_modules.parsers = mock_parsers
|
|
mock_datapizza.modules = mock_modules
|
|
|
|
mock_splitters = Mock()
|
|
mock_splitters.NodeSplitter = Mock()
|
|
mock_datapizza.modules.splitters = mock_splitters
|
|
|
|
mock_pipeline = Mock()
|
|
mock_pipeline.IngestionPipeline = Mock()
|
|
mock_datapizza.pipeline = mock_pipeline
|
|
|
|
mock_clients = Mock()
|
|
mock_clients.openai = Mock()
|
|
mock_clients.anthropic = Mock()
|
|
mock_clients.google = Mock()
|
|
mock_clients.mistral = Mock()
|
|
mock_clients.azure = Mock()
|
|
mock_datapizza.clients = mock_clients
|
|
|
|
# Patch sys.modules
|
|
with patch.dict(
|
|
"sys.modules",
|
|
{
|
|
"datapizza": mock_datapizza,
|
|
"datapizza.vectorstores": mock_vectorstores,
|
|
"datapizza.vectorstores.qdrant": mock_qdrant,
|
|
"datapizza.embedders": mock_embedders,
|
|
"datapizza.embedders.openai": mock_openai_embedder_module,
|
|
"datapizza.modules": mock_modules,
|
|
"datapizza.modules.parsers": mock_parsers,
|
|
"datapizza.modules.parsers.docling": mock_docling,
|
|
"datapizza.modules.splitters": mock_splitters,
|
|
"datapizza.pipeline": mock_pipeline,
|
|
"datapizza.clients": mock_clients,
|
|
"datapizza.clients.openai": mock_clients.openai,
|
|
"datapizza.clients.anthropic": mock_clients.anthropic,
|
|
"datapizza.clients.google": mock_clients.google,
|
|
"datapizza.clients.mistral": mock_clients.mistral,
|
|
"datapizza.clients.azure": mock_clients.azure,
|
|
},
|
|
):
|
|
yield
|