anderson-ufrj commited on
Commit
d392f49
·
1 Parent(s): 6891803

test(integration): add comprehensive integration tests for main flows

Browse files

- Test complete investigation flow from creation to completion
- Add tests for multi-agent orchestration and coordination
- Test chat conversation flow with context switching
- Verify streaming functionality and error recovery
- Test comprehensive analysis with parallel processing
- Add caching integration tests
- Test end-to-end flow from user request to final report
- Verify concurrent user flow isolation
- Test circuit breaker integration in main flows
- Add fallback mechanism tests
- Cover error handling and timeout scenarios
- Test data flow between services and agents

Files changed (1) hide show
  1. tests/integration/test_main_flows.py +665 -0
tests/integration/test_main_flows.py ADDED
@@ -0,0 +1,665 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Integration tests for main application flows.
3
+ Tests end-to-end workflows including investigation, chat, and analysis.
4
+ """
5
+
6
+ import pytest
7
+ import asyncio
8
+ from datetime import datetime
9
+ from unittest.mock import Mock, AsyncMock, patch, MagicMock
10
+ from typing import Dict, Any, List
11
+
12
+ from src.services.investigation_service import InvestigationService
13
+ from src.services.chat_service import ChatService, ChatMessage
14
+ from src.services.analysis_service import AnalysisService
15
+ from src.agents.deodoro import AgentContext, AgentMessage, AgentResponse, AgentStatus
16
+ from src.agents.zumbi import InvestigatorAgent
17
+ from src.agents.anita import AnalystAgent
18
+ from src.agents.tiradentes import ReporterAgent
19
+ from src.models.investigation import (
20
+ Investigation, InvestigationStatus,
21
+ InvestigationRequest, InvestigationResponse
22
+ )
23
+ from src.core.exceptions import (
24
+ InvestigationNotFoundError,
25
+ InvalidRequestError,
26
+ AgentExecutionError
27
+ )
28
+
29
+
30
+ @pytest.fixture
31
+ async def investigation_service():
32
+ """Create investigation service for testing."""
33
+ service = InvestigationService()
34
+ yield service
35
+
36
+
37
+ @pytest.fixture
38
+ async def chat_service():
39
+ """Create chat service for testing."""
40
+ service = ChatService()
41
+ yield service
42
+
43
+
44
+ @pytest.fixture
45
+ async def analysis_service():
46
+ """Create analysis service for testing."""
47
+ service = AnalysisService()
48
+ yield service
49
+
50
+
51
+ @pytest.fixture
52
+ def mock_agent_pool():
53
+ """Mock agent pool for testing."""
54
+ pool = AsyncMock()
55
+
56
+ # Mock agent instances
57
+ mock_zumbi = AsyncMock(spec=InvestigatorAgent)
58
+ mock_zumbi.name = "Zumbi"
59
+ mock_zumbi.process.return_value = AgentResponse(
60
+ agent_name="Zumbi",
61
+ status=AgentStatus.COMPLETED,
62
+ result={
63
+ "anomalies_detected": 3,
64
+ "findings": [
65
+ {"type": "price_anomaly", "severity": "high"},
66
+ {"type": "temporal_pattern", "severity": "medium"},
67
+ {"type": "vendor_concentration", "severity": "low"}
68
+ ],
69
+ "confidence": 0.85
70
+ }
71
+ )
72
+
73
+ mock_anita = AsyncMock(spec=AnalystAgent)
74
+ mock_anita.name = "Anita"
75
+ mock_anita.process.return_value = AgentResponse(
76
+ agent_name="Anita",
77
+ status=AgentStatus.COMPLETED,
78
+ result={
79
+ "patterns": ["seasonal_spike", "vendor_clustering"],
80
+ "correlations": [{"field1": "price", "field2": "date", "strength": 0.72}],
81
+ "risk_score": 0.68
82
+ }
83
+ )
84
+
85
+ mock_tiradentes = AsyncMock(spec=ReporterAgent)
86
+ mock_tiradentes.name = "Tiradentes"
87
+ mock_tiradentes.process.return_value = AgentResponse(
88
+ agent_name="Tiradentes",
89
+ status=AgentStatus.COMPLETED,
90
+ result={
91
+ "report": "# Investigation Report\n\n## Summary\nMultiple anomalies detected...",
92
+ "executive_summary": "3 anomalies found with high confidence",
93
+ "recommendations": ["Review vendor contracts", "Audit pricing mechanism"]
94
+ }
95
+ )
96
+
97
+ # Mock acquire context manager
98
+ @asynccontextmanager
99
+ async def mock_acquire(agent_type, context):
100
+ if agent_type == InvestigatorAgent:
101
+ yield mock_zumbi
102
+ elif agent_type == AnalystAgent:
103
+ yield mock_anita
104
+ elif agent_type == ReporterAgent:
105
+ yield mock_tiradentes
106
+ else:
107
+ yield AsyncMock()
108
+
109
+ pool.acquire = mock_acquire
110
+ return pool
111
+
112
+
113
+ @pytest.fixture
114
+ def mock_data_service():
115
+ """Mock data service for testing."""
116
+ service = AsyncMock()
117
+ service.search_contracts.return_value = {
118
+ "contracts": [
119
+ {
120
+ "id": "12345",
121
+ "vendor": "Empresa XYZ",
122
+ "value": 1000000.00,
123
+ "date": "2024-01-15",
124
+ "status": "active"
125
+ },
126
+ {
127
+ "id": "67890",
128
+ "vendor": "Empresa ABC",
129
+ "value": 2500000.00,
130
+ "date": "2024-02-01",
131
+ "status": "active"
132
+ }
133
+ ],
134
+ "total": 2,
135
+ "metadata": {"source": "portal_transparencia"}
136
+ }
137
+ return service
138
+
139
+
140
+ class TestInvestigationFlow:
141
+ """Test complete investigation flow."""
142
+
143
+ @pytest.mark.integration
144
+ @pytest.mark.asyncio
145
+ async def test_create_and_run_investigation(
146
+ self,
147
+ investigation_service,
148
+ mock_agent_pool,
149
+ mock_data_service
150
+ ):
151
+ """Test creating and running a complete investigation."""
152
+ with patch('src.agents.get_agent_pool', return_value=mock_agent_pool):
153
+ with patch('src.services.investigation_service.DataService', return_value=mock_data_service):
154
+ # Create investigation
155
+ investigation = await investigation_service.create(
156
+ user_id="test_user",
157
+ query="Analyze contracts from company XYZ",
158
+ data_sources=["portal_transparencia"]
159
+ )
160
+
161
+ assert investigation.id is not None
162
+ assert investigation.status == "created"
163
+ assert investigation.user_id == "test_user"
164
+
165
+ # Run investigation (would be done by background worker in production)
166
+ result = await investigation_service.run_investigation(investigation.id)
167
+
168
+ assert result.status == "completed"
169
+ assert result.confidence_score > 0.7
170
+ assert "anomalies_detected" in result.metadata
171
+ assert result.metadata["anomalies_detected"] == 3
172
+
173
+ @pytest.mark.integration
174
+ @pytest.mark.asyncio
175
+ async def test_investigation_with_multiple_agents(
176
+ self,
177
+ investigation_service,
178
+ mock_agent_pool,
179
+ mock_data_service
180
+ ):
181
+ """Test investigation using multiple agents in sequence."""
182
+ with patch('src.agents.get_agent_pool', return_value=mock_agent_pool):
183
+ with patch('src.services.investigation_service.DataService', return_value=mock_data_service):
184
+ investigation = await investigation_service.create(
185
+ user_id="test_user",
186
+ query="Deep analysis of government contracts",
187
+ analysis_type="comprehensive"
188
+ )
189
+
190
+ # Mock the orchestration to use multiple agents
191
+ result = await investigation_service.run_investigation(investigation.id)
192
+
193
+ # Verify all agents were called
194
+ assert mock_agent_pool.acquire.call_count >= 3 # At least Zumbi, Anita, Tiradentes
195
+
196
+ # Verify result contains data from all agents
197
+ assert "findings" in result.metadata
198
+ assert "patterns" in result.metadata
199
+ assert "report" in result.metadata
200
+
201
+ @pytest.mark.integration
202
+ @pytest.mark.asyncio
203
+ async def test_investigation_error_handling(
204
+ self,
205
+ investigation_service,
206
+ mock_agent_pool
207
+ ):
208
+ """Test investigation error handling."""
209
+ # Make agent fail
210
+ mock_agent_pool.acquire.side_effect = AgentExecutionError("Agent failed")
211
+
212
+ with patch('src.agents.get_agent_pool', return_value=mock_agent_pool):
213
+ investigation = await investigation_service.create(
214
+ user_id="test_user",
215
+ query="Test query"
216
+ )
217
+
218
+ with pytest.raises(AgentExecutionError):
219
+ await investigation_service.run_investigation(investigation.id)
220
+
221
+ # Check investigation status
222
+ updated = await investigation_service.get(investigation.id)
223
+ assert updated.status == "failed"
224
+
225
+ @pytest.mark.integration
226
+ @pytest.mark.asyncio
227
+ async def test_investigation_timeout(self, investigation_service):
228
+ """Test investigation timeout handling."""
229
+ # Create slow mock agent
230
+ slow_agent = AsyncMock()
231
+
232
+ async def slow_process(*args, **kwargs):
233
+ await asyncio.sleep(10) # Longer than timeout
234
+
235
+ slow_agent.process = slow_process
236
+
237
+ mock_pool = AsyncMock()
238
+
239
+ @asynccontextmanager
240
+ async def mock_acquire(agent_type, context):
241
+ yield slow_agent
242
+
243
+ mock_pool.acquire = mock_acquire
244
+
245
+ with patch('src.agents.get_agent_pool', return_value=mock_pool):
246
+ investigation = await investigation_service.create(
247
+ user_id="test_user",
248
+ query="Test timeout",
249
+ timeout=1 # 1 second timeout
250
+ )
251
+
252
+ with pytest.raises(asyncio.TimeoutError):
253
+ await investigation_service.run_investigation(investigation.id)
254
+
255
+
256
+ class TestChatFlow:
257
+ """Test complete chat flow."""
258
+
259
+ @pytest.mark.integration
260
+ @pytest.mark.asyncio
261
+ async def test_chat_conversation_flow(self, chat_service, mock_agent_pool):
262
+ """Test full chat conversation flow."""
263
+ with patch('src.agents.get_agent_pool', return_value=mock_agent_pool):
264
+ # Start session
265
+ session_id = await chat_service.create_session("test_user")
266
+
267
+ # Send message
268
+ response = await chat_service.send_message(
269
+ session_id=session_id,
270
+ message="Analyze recent government contracts"
271
+ )
272
+
273
+ assert response.session_id == session_id
274
+ assert response.content is not None
275
+ assert response.metadata.get("agent") == "Zumbi"
276
+
277
+ # Continue conversation
278
+ response2 = await chat_service.send_message(
279
+ session_id=session_id,
280
+ message="Can you provide more details?"
281
+ )
282
+
283
+ assert response2.session_id == session_id
284
+ assert len(await chat_service.get_history(session_id)) == 4 # 2 user + 2 assistant
285
+
286
+ @pytest.mark.integration
287
+ @pytest.mark.asyncio
288
+ async def test_chat_with_context_switching(self, chat_service, mock_agent_pool):
289
+ """Test chat with context switching between agents."""
290
+ with patch('src.agents.get_agent_pool', return_value=mock_agent_pool):
291
+ session_id = await chat_service.create_session("test_user")
292
+
293
+ # Ask for investigation
294
+ response1 = await chat_service.send_message(
295
+ session_id=session_id,
296
+ message="Investigate contract anomalies"
297
+ )
298
+
299
+ # Ask for analysis
300
+ response2 = await chat_service.send_message(
301
+ session_id=session_id,
302
+ message="Analyze the patterns found"
303
+ )
304
+
305
+ # Ask for report
306
+ response3 = await chat_service.send_message(
307
+ session_id=session_id,
308
+ message="Generate a report of findings"
309
+ )
310
+
311
+ history = await chat_service.get_history(session_id)
312
+
313
+ # Verify different agents were used
314
+ agents_used = {msg.metadata.get("agent") for msg in history if msg.role == "assistant"}
315
+ assert len(agents_used) >= 2 # Multiple agents used
316
+
317
+ @pytest.mark.integration
318
+ @pytest.mark.asyncio
319
+ async def test_chat_streaming(self, chat_service, mock_agent_pool):
320
+ """Test chat streaming functionality."""
321
+ with patch('src.agents.get_agent_pool', return_value=mock_agent_pool):
322
+ session_id = await chat_service.create_session("test_user")
323
+
324
+ chunks = []
325
+ async for chunk in chat_service.stream_message(
326
+ session_id=session_id,
327
+ message="Generate detailed analysis"
328
+ ):
329
+ chunks.append(chunk)
330
+
331
+ assert len(chunks) > 0
332
+ complete_response = "".join(chunk.content for chunk in chunks)
333
+ assert len(complete_response) > 0
334
+
335
+ @pytest.mark.integration
336
+ @pytest.mark.asyncio
337
+ async def test_chat_error_recovery(self, chat_service):
338
+ """Test chat error recovery mechanisms."""
339
+ # Mock agent that fails first time, succeeds second time
340
+ attempts = 0
341
+
342
+ async def flaky_process(*args, **kwargs):
343
+ nonlocal attempts
344
+ attempts += 1
345
+ if attempts == 1:
346
+ raise Exception("Temporary failure")
347
+ return AgentResponse(
348
+ agent_name="TestAgent",
349
+ status=AgentStatus.COMPLETED,
350
+ result={"message": "Success after retry"}
351
+ )
352
+
353
+ mock_agent = AsyncMock()
354
+ mock_agent.process = flaky_process
355
+
356
+ mock_pool = AsyncMock()
357
+
358
+ @asynccontextmanager
359
+ async def mock_acquire(agent_type, context):
360
+ yield mock_agent
361
+
362
+ mock_pool.acquire = mock_acquire
363
+
364
+ with patch('src.agents.get_agent_pool', return_value=mock_pool):
365
+ session_id = await chat_service.create_session("test_user")
366
+
367
+ # Should succeed after retry
368
+ response = await chat_service.send_message(
369
+ session_id=session_id,
370
+ message="Test retry mechanism"
371
+ )
372
+
373
+ assert response.content is not None
374
+ assert attempts == 2 # Failed once, succeeded on retry
375
+
376
+
377
+ class TestAnalysisFlow:
378
+ """Test complete analysis flow."""
379
+
380
+ @pytest.mark.integration
381
+ @pytest.mark.asyncio
382
+ async def test_comprehensive_analysis(
383
+ self,
384
+ analysis_service,
385
+ mock_agent_pool,
386
+ mock_data_service
387
+ ):
388
+ """Test comprehensive data analysis flow."""
389
+ with patch('src.agents.get_agent_pool', return_value=mock_agent_pool):
390
+ with patch('src.services.analysis_service.DataService', return_value=mock_data_service):
391
+ # Run analysis
392
+ results = await analysis_service.analyze_contracts(
393
+ filters={"vendor": "Empresa XYZ"},
394
+ analysis_types=["anomaly", "pattern", "risk"]
395
+ )
396
+
397
+ assert "anomalies" in results
398
+ assert "patterns" in results
399
+ assert "risk_assessment" in results
400
+ assert results["anomalies"]["count"] == 3
401
+ assert results["risk_assessment"]["score"] > 0.5
402
+
403
+ @pytest.mark.integration
404
+ @pytest.mark.asyncio
405
+ async def test_parallel_analysis(self, analysis_service, mock_agent_pool):
406
+ """Test parallel analysis with multiple agents."""
407
+ with patch('src.agents.get_agent_pool', return_value=mock_agent_pool):
408
+ # Mock parallel processor
409
+ with patch('src.agents.parallel_processor.parallel_processor') as mock_parallel:
410
+ mock_parallel.execute_parallel.return_value = [
411
+ Mock(
412
+ success=True,
413
+ result=AgentResponse(
414
+ agent_name="Zumbi",
415
+ status=AgentStatus.COMPLETED,
416
+ result={"anomalies": 5}
417
+ )
418
+ ),
419
+ Mock(
420
+ success=True,
421
+ result=AgentResponse(
422
+ agent_name="Anita",
423
+ status=AgentStatus.COMPLETED,
424
+ result={"patterns": 3}
425
+ )
426
+ )
427
+ ]
428
+
429
+ results = await analysis_service.parallel_analysis(
430
+ data_sources=["contracts", "payments"],
431
+ agents=["zumbi", "anita"]
432
+ )
433
+
434
+ assert mock_parallel.execute_parallel.called
435
+ assert len(results) == 2
436
+ assert results[0]["agent"] == "Zumbi"
437
+ assert results[1]["agent"] == "Anita"
438
+
439
+ @pytest.mark.integration
440
+ @pytest.mark.asyncio
441
+ async def test_analysis_caching(self, analysis_service, mock_agent_pool):
442
+ """Test analysis results caching."""
443
+ with patch('src.agents.get_agent_pool', return_value=mock_agent_pool):
444
+ with patch('src.services.cache_service.CacheService') as mock_cache:
445
+ cache_instance = AsyncMock()
446
+ mock_cache.return_value = cache_instance
447
+ cache_instance.get.return_value = None # First call, no cache
448
+
449
+ # First analysis
450
+ results1 = await analysis_service.analyze_with_cache(
451
+ query_hash="test_hash",
452
+ analysis_func=AsyncMock(return_value={"data": "results"})
453
+ )
454
+
455
+ # Verify cache was set
456
+ assert cache_instance.set.called
457
+
458
+ # Second call should use cache
459
+ cache_instance.get.return_value = {"data": "cached_results"}
460
+ results2 = await analysis_service.analyze_with_cache(
461
+ query_hash="test_hash",
462
+ analysis_func=AsyncMock()
463
+ )
464
+
465
+ assert results2["data"] == "cached_results"
466
+
467
+
468
+ class TestEndToEndFlow:
469
+ """Test complete end-to-end application flow."""
470
+
471
+ @pytest.mark.integration
472
+ @pytest.mark.asyncio
473
+ async def test_full_investigation_to_report_flow(
474
+ self,
475
+ investigation_service,
476
+ chat_service,
477
+ analysis_service,
478
+ mock_agent_pool,
479
+ mock_data_service
480
+ ):
481
+ """Test full flow from user request to final report."""
482
+ with patch('src.agents.get_agent_pool', return_value=mock_agent_pool):
483
+ with patch('src.services.investigation_service.DataService', return_value=mock_data_service):
484
+ # 1. User starts chat session
485
+ session_id = await chat_service.create_session("test_user")
486
+
487
+ # 2. User requests investigation
488
+ chat_response = await chat_service.send_message(
489
+ session_id=session_id,
490
+ message="I want to investigate suspicious contracts from Empresa XYZ"
491
+ )
492
+
493
+ # 3. System creates investigation
494
+ investigation = await investigation_service.create(
495
+ user_id="test_user",
496
+ query="Investigate Empresa XYZ contracts",
497
+ session_id=session_id
498
+ )
499
+
500
+ # 4. Run investigation with multiple agents
501
+ investigation_result = await investigation_service.run_investigation(
502
+ investigation.id
503
+ )
504
+
505
+ # 5. Generate analysis
506
+ analysis_results = await analysis_service.analyze_investigation(
507
+ investigation_id=investigation.id
508
+ )
509
+
510
+ # 6. Generate final report
511
+ report_response = await chat_service.send_message(
512
+ session_id=session_id,
513
+ message="Generate executive report of findings"
514
+ )
515
+
516
+ # Verify complete flow
517
+ assert investigation_result.status == "completed"
518
+ assert analysis_results["anomalies"]["count"] > 0
519
+ assert "executive_summary" in report_response.metadata
520
+
521
+ # Verify audit trail
522
+ history = await chat_service.get_history(session_id)
523
+ assert len(history) >= 4 # Initial request + responses
524
+
525
+ @pytest.mark.integration
526
+ @pytest.mark.asyncio
527
+ async def test_concurrent_user_flows(
528
+ self,
529
+ investigation_service,
530
+ chat_service,
531
+ mock_agent_pool
532
+ ):
533
+ """Test handling multiple concurrent user flows."""
534
+ with patch('src.agents.get_agent_pool', return_value=mock_agent_pool):
535
+ # Create multiple user sessions
536
+ users = ["user1", "user2", "user3"]
537
+ sessions = []
538
+
539
+ for user in users:
540
+ session_id = await chat_service.create_session(user)
541
+ sessions.append((user, session_id))
542
+
543
+ # Run concurrent investigations
544
+ tasks = []
545
+ for user, session_id in sessions:
546
+ task = investigation_service.create(
547
+ user_id=user,
548
+ query=f"Investigation for {user}"
549
+ )
550
+ tasks.append(task)
551
+
552
+ investigations = await asyncio.gather(*tasks)
553
+
554
+ # Verify all created successfully
555
+ assert len(investigations) == 3
556
+ assert all(inv.user_id in users for inv in investigations)
557
+
558
+ # Run concurrent processing
559
+ process_tasks = []
560
+ for inv in investigations:
561
+ task = investigation_service.run_investigation(inv.id)
562
+ process_tasks.append(task)
563
+
564
+ results = await asyncio.gather(*process_tasks)
565
+
566
+ # Verify all completed
567
+ assert all(r.status == "completed" for r in results)
568
+
569
+ # Verify isolation - each investigation has its own data
570
+ user_ids = {r.user_id for r in results}
571
+ assert len(user_ids) == 3
572
+
573
+
574
+ class TestErrorRecoveryFlow:
575
+ """Test error recovery and resilience flows."""
576
+
577
+ @pytest.mark.integration
578
+ @pytest.mark.asyncio
579
+ async def test_circuit_breaker_integration(
580
+ self,
581
+ investigation_service,
582
+ mock_data_service
583
+ ):
584
+ """Test circuit breaker integration in main flows."""
585
+ from src.infrastructure.resilience.circuit_breaker import circuit_breaker_manager
586
+
587
+ # Mock data service to fail multiple times
588
+ mock_data_service.search_contracts.side_effect = [
589
+ Exception("Service unavailable"),
590
+ Exception("Service unavailable"),
591
+ Exception("Service unavailable"),
592
+ {"contracts": [], "total": 0} # Eventually succeeds
593
+ ]
594
+
595
+ with patch('src.services.investigation_service.DataService', return_value=mock_data_service):
596
+ # First investigations should fail and open circuit
597
+ for i in range(3):
598
+ investigation = await investigation_service.create(
599
+ user_id="test_user",
600
+ query=f"Test {i}"
601
+ )
602
+
603
+ with pytest.raises(Exception):
604
+ await investigation_service.run_investigation(investigation.id)
605
+
606
+ # Check circuit breaker status
607
+ stats = circuit_breaker_manager.get_all_stats()
608
+ # Circuit should be open for the failed service
609
+
610
+ # Wait for recovery timeout
611
+ await asyncio.sleep(1.1)
612
+
613
+ # Next attempt should work
614
+ investigation = await investigation_service.create(
615
+ user_id="test_user",
616
+ query="Test recovery"
617
+ )
618
+
619
+ result = await investigation_service.run_investigation(investigation.id)
620
+ assert result.status == "completed"
621
+
622
+ @pytest.mark.integration
623
+ @pytest.mark.asyncio
624
+ async def test_fallback_mechanisms(self, chat_service):
625
+ """Test fallback mechanisms in main flows."""
626
+ # Mock all agents to fail
627
+ failed_agent = AsyncMock()
628
+ failed_agent.process.side_effect = Exception("Agent unavailable")
629
+
630
+ mock_pool = AsyncMock()
631
+
632
+ @asynccontextmanager
633
+ async def mock_acquire(agent_type, context):
634
+ yield failed_agent
635
+
636
+ mock_pool.acquire = mock_acquire
637
+
638
+ with patch('src.agents.get_agent_pool', return_value=mock_pool):
639
+ # Mock emergency chat to work
640
+ with patch('src.services.chat_emergency.emergency_chat_handler') as mock_emergency:
641
+ mock_emergency.handle_message.return_value = {
642
+ "response": "Emergency response",
643
+ "status": "fallback"
644
+ }
645
+
646
+ session_id = await chat_service.create_session("test_user")
647
+
648
+ # Should fall back to emergency handler
649
+ response = await chat_service.send_message(
650
+ session_id=session_id,
651
+ message="Test message",
652
+ use_fallback=True
653
+ )
654
+
655
+ assert response.content == "Emergency response"
656
+ assert response.metadata["status"] == "fallback"
657
+
658
+
659
+ from contextlib import asynccontextmanager
660
+
661
+
662
+ @asynccontextmanager
663
+ async def mock_acquire(agent_type, context):
664
+ """Mock context manager for agent pool acquire."""
665
+ yield AsyncMock()