vex_llm/
mock.rs

1//! Mock LLM provider for testing
2
3use async_trait::async_trait;
4use std::time::Instant;
5
6use crate::provider::{LlmError, LlmProvider, LlmRequest, LlmResponse};
7
8/// A mock LLM provider that returns predefined responses
9/// Perfect for testing without needing actual LLM access
10#[derive(Debug)]
11pub struct MockProvider {
12    /// Name of this mock
13    pub name: String,
14    /// Canned responses (cycles through them)
15    responses: Vec<String>,
16    /// Current response index
17    index: std::sync::atomic::AtomicUsize,
18    /// Simulated latency in ms
19    latency_ms: u64,
20}
21
22impl MockProvider {
23    /// Create a new mock provider with given responses
24    pub fn new(responses: Vec<String>) -> Self {
25        Self {
26            name: "mock".to_string(),
27            responses,
28            index: std::sync::atomic::AtomicUsize::new(0),
29            latency_ms: 50,
30        }
31    }
32
33    /// Create a mock that always returns the same response
34    pub fn constant(response: &str) -> Self {
35        Self::new(vec![response.to_string()])
36    }
37
38    /// Create a mock for adversarial testing (alternates agree/disagree)
39    pub fn adversarial() -> Self {
40        Self::new(vec![
41            "I agree with this assessment. The reasoning is sound.".to_string(),
42            "I disagree. There are several issues: 1) The logic is flawed, 2) Missing evidence."
43                .to_string(),
44        ])
45    }
46
47    /// Create a smart mock that responds based on prompt content
48    pub fn smart() -> Self {
49        Self {
50            name: "smart-mock".to_string(),
51            responses: vec![],
52            index: std::sync::atomic::AtomicUsize::new(0),
53            latency_ms: 50,
54        }
55    }
56
57    fn generate_smart_response(&self, request: &LlmRequest) -> String {
58        let prompt_lower = request.prompt.to_lowercase();
59
60        // Detect if this is a challenge/verification request
61        if prompt_lower.contains("challenge")
62            || prompt_lower.contains("verify")
63            || prompt_lower.contains("critique")
64        {
65            return "After careful analysis, I found the following concerns:\n\
66                 1. The claim requires additional evidence\n\
67                 2. There may be alternative interpretations\n\
68                 3. Confidence level: 70%\n\n\
69                 Recommendation: Proceed with caution."
70                .to_string();
71        }
72
73        // Detect if this is a research/exploration request
74        if prompt_lower.contains("research")
75            || prompt_lower.contains("explore")
76            || prompt_lower.contains("analyze")
77        {
78            return "Based on my analysis:\n\n\
79                 ## Key Findings\n\
80                 1. Primary insight discovered\n\
81                 2. Supporting evidence found\n\
82                 3. Potential implications identified\n\n\
83                 ## Confidence: 85%\n\n\
84                 This analysis is based on available information."
85                .to_string();
86        }
87
88        // Detect if this is a summary request
89        if prompt_lower.contains("summarize") || prompt_lower.contains("summary") {
90            return "Summary: The key points are consolidated into a concise overview.".to_string();
91        }
92
93        // Default intelligent response
94        format!(
95            "I understand you're asking about: \"{}\"\n\n\
96             Here's my response based on the context provided:\n\
97             - The request has been processed\n\
98             - Analysis complete\n\
99             - Ready for further instructions",
100            &request.prompt[..request.prompt.len().min(50)]
101        )
102    }
103}
104
105#[async_trait]
106impl LlmProvider for MockProvider {
107    fn name(&self) -> &str {
108        &self.name
109    }
110
111    async fn is_available(&self) -> bool {
112        true // Mock is always available
113    }
114
115    async fn complete(&self, request: LlmRequest) -> Result<LlmResponse, LlmError> {
116        let start = Instant::now();
117
118        // Simulate latency
119        tokio::time::sleep(std::time::Duration::from_millis(self.latency_ms)).await;
120
121        let content = if self.responses.is_empty() {
122            self.generate_smart_response(&request)
123        } else {
124            // Cycle through canned responses
125            let idx = self
126                .index
127                .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
128            self.responses[idx % self.responses.len()].clone()
129        };
130
131        Ok(LlmResponse {
132            content,
133            model: self.name.clone(),
134            tokens_used: Some((request.prompt.len() / 4) as u32 + 100),
135            latency_ms: start.elapsed().as_millis() as u64,
136            trace_root: None,
137        })
138    }
139}
140
141#[async_trait]
142impl crate::provider::EmbeddingProvider for MockProvider {
143    async fn embed(&self, _text: &str) -> Result<Vec<f32>, LlmError> {
144        // Return a zeroed vector of dimension 1536 (common for OpenAI)
145        Ok(vec![0.0; 1536])
146    }
147}
148
149#[cfg(test)]
150mod tests {
151    use super::*;
152
153    #[tokio::test]
154    async fn test_mock_provider() {
155        let mock = MockProvider::constant("Hello, world!");
156        let response = mock.ask("test").await.unwrap();
157        assert_eq!(response, "Hello, world!");
158    }
159
160    #[tokio::test]
161    async fn test_smart_mock() {
162        let mock = MockProvider::smart();
163        let response = mock.ask("Please analyze this data").await.unwrap();
164        assert!(response.contains("Key Findings"));
165    }
166}