vex_llm/
lib.rs

1//! # VEX LLM
2//!
3//! LLM provider integrations for VEX agents.
4//!
5//! ## Supported Backends
6//!
7//! | Provider | Type | Key Required |
8//! |----------|------|--------------|
9//! | DeepSeek | API | `DEEPSEEK_API_KEY` |
10//! | Mistral | API | `MISTRAL_API_KEY` |
11//! | OpenAI | API | `OPENAI_API_KEY` |
12//! | Ollama | Local | None |
13//! | Mock | Testing | None |
14//!
15//! ## Quick Start
16//!
17//! ```rust
18//! use vex_llm::{MockProvider, LlmProvider};
19//!
20//! #[tokio::main]
21//! async fn main() {
22//!     // Use mock provider for testing
23//!     let llm = MockProvider::smart();
24//!     
25//!     // Ask a question
26//!     let response = llm.ask("What is quantum computing?").await.unwrap();
27//!     println!("{}", response);
28//! }
29//! ```
30//!
31//! ## With DeepSeek
32//!
33//! ```rust,ignore
34//! use vex_llm::DeepSeekProvider;
35//!
36//! let api_key = std::env::var("DEEPSEEK_API_KEY").unwrap();
37//! let llm = DeepSeekProvider::new(api_key);
38//!
39//! let response = llm.ask("Explain Merkle trees").await.unwrap();
40//! ```
41//!
42//! ## With Mistral
43//!
44//! ```rust,ignore
45//! use vex_llm::MistralProvider;
46//!
47//! let api_key = std::env::var("MISTRAL_API_KEY").unwrap();
48//! let llm = MistralProvider::small(&api_key); // or large(), medium(), codestral()
49//!
50//! let response = llm.ask("Explain Merkle trees").await.unwrap();
51//! ```
52//!
53//! ## Rate Limiting
54//!
55//! ```rust
56//! use vex_llm::{RateLimiter, RateLimitConfig};
57//!
58//! let limiter = RateLimiter::new(RateLimitConfig::default());
59//!
60//! // Check if request is allowed (in async context)
61//! // limiter.try_acquire("user123").await.unwrap();
62//! ```
63
64pub mod cached_provider;
65pub mod config;
66pub mod deepseek;
67pub mod mcp;
68pub mod metrics;
69pub mod mistral;
70pub mod mock;
71pub mod ollama;
72pub mod openai;
73pub mod provider;
74pub mod rate_limit;
75pub mod resilient_provider;
76pub mod streaming_tool;
77pub mod tool;
78pub mod tool_error;
79pub mod tool_executor;
80pub mod tool_result;
81pub mod tools;
82
83pub use cached_provider::{CachedProvider, LlmCacheConfig};
84pub use config::{ConfigError, LlmConfig, VexConfig};
85pub use deepseek::DeepSeekProvider;
86pub use metrics::{global_metrics, Metrics, MetricsSnapshot, Span, Timer};
87pub use mistral::MistralProvider;
88pub use mock::MockProvider;
89pub use ollama::OllamaProvider;
90pub use openai::OpenAIProvider;
91pub use provider::{EmbeddingProvider, LlmError, LlmProvider, LlmRequest, LlmResponse};
92pub use rate_limit::{RateLimitConfig, RateLimitError, RateLimitedProvider, RateLimiter};
93pub use resilient_provider::{CircuitState, LlmCircuitConfig, ResilientProvider};
94pub use streaming_tool::{StreamConfig, StreamingTool, ToolChunk, ToolStream};
95pub use tool::{Capability, Tool, ToolDefinition, ToolRegistry};
96pub use tool_error::ToolError;
97pub use tool_executor::ToolExecutor;
98pub use tool_result::ToolResult;
99pub use tools::{CalculatorTool, DateTimeTool, HashTool, JsonPathTool, RegexTool, UuidTool};