Files
.llmx/config.toml
Sebastian Krüger 5eda3b5c6a refactor: clean up config, switch to claude-sonnet-4-5
- Switch model from Llama-3.1-8B to claude-sonnet-4-5
- Remove disabled MCP servers to simplify config
- Keep only active servers: git, brave_search, python_runner, crawl4ai_rag

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-01 06:27:10 +01:00

68 lines
1.8 KiB
TOML

model_provider = "litellm"
model = "anthropic/claude-sonnet-4-5-20250929"
# model = "hosted_vllm/meta-llama/Llama-3.1-8B-Instruct"
# Context window settings for Llama-3.1-8B (24K context)
# model_context_window = 24576
# model_auto_compact_token_limit = 15000
# model_max_output_tokens = 1024
[projects."/home/valknar"]
trust_level = "trusted"
[projects."/home/valknar/Projects/llmx"]
trust_level = "trusted"
[projects."/home/valknar/Projects/docker-compose"]
trust_level = "trusted"
[projects."/home/valknar/Projects/kit-ui"]
trust_level = "trusted"
[projects."/home/valknar/Projects/image-ui"]
trust_level = "trusted"
[projects."/home/valknar/bin"]
trust_level = "trusted"
[projects."/home/valknar/Projects/pivoine.art"]
trust_level = "trusted"
[mcp_servers.git]
# Git operations - Python uvx (no NPM package available)
command = "uvx"
args = ["mcp-server-git"]
enabled = true
startup_timeout_sec = 10
[mcp_servers.playwright]
# Browser automation - most popular MCP server (826k weekly downloads)
command = "npx"
args = ["-y", "@playwright/mcp"]
enabled = false # Disabled to reduce prompt size
startup_timeout_sec = 20
[mcp_servers.brave_search]
# Brave Search - requires API key (BRAVE_API_KEY in .env)
command = "/home/valknar/.llmx/run-brave-search-mcp.sh"
args = []
enabled = true
startup_timeout_sec = 15
[mcp_servers.python_runner]
# Secure Python sandbox execution (Python uvx, requires Deno)
command = "uvx"
args = ["mcp-run-python", "stdio"]
enabled = true
startup_timeout_sec = 15
[mcp_servers.crawl4ai_rag]
# Web crawling + RAG with vector search (local Docker stack)
# Uses BGE embeddings via remote LiteLLM, local PostgreSQL + pgvector
# Start DB first: cd ~/.llmx && docker compose up -d crawl4ai-db crawl4ai-rest
command = "/home/valknar/.llmx/run-crawl4ai-mcp.sh"
args = []
enabled = true
startup_timeout_sec = 60
tool_timeout_sec = 120