Add MCPO local test

This commit is contained in:
nicolargo 2025-12-29 10:36:49 +01:00
parent c1bfe0b063
commit e9200db776
4 changed files with 106 additions and 26 deletions

View File

@ -14,7 +14,7 @@ sudo usermod -aG docker $USER
### 1. Start the Services ### 1. Start the Services
Warning: you should use docker-compose v2. If you have v1 installed, please uninstall it first. Warning: you should use docker compose v2. If you have v1 installed, please uninstall it first.
```bash ```bash
# Save the docker-compose.yml file and run: # Save the docker-compose.yml file and run:
@ -65,25 +65,92 @@ curl http://localhost:61208/api/4/all
### 5. Configure MCP Server Connection ### 5. Configure MCP Server Connection
In Open WebUI, you'll need to configure the MCP server. Here's how to test if your Glances MCP server can access the API: Install mcpo:
**Example MCP Configuration** (adjust based on your MCP implementation): ```bash
uv add mcpo
```
```json Create MCPO configuration file:
```bash
cd /home/nicolargo/dev/glances/mcp
# Create configuration
cat > mcp_config.json << 'EOF'
{ {
"mcpServers": { "mcpServers": {
"glances": { "glances": {
"command": "node", "command": "python3",
"args": ["/path/to/your/glances-mcp-server/index.js"], "args": ["glances_mcp.py"],
"env": { "env": {
"GLANCES_API_URL": "http://host.docker.internal:61208/api/4" "GLANCES_API_URL": "http://localhost:61208/api/4"
} }
} }
} }
} }
EOF
``` ```
**Note**: Use `host.docker.internal` to access services on your host machine from within Docker containers. Run MCPO proxy:
```bash
../.venv-uv/bin/uv run mcpo --port 8000 --config mcp_config.json
Starting MCP OpenAPI Proxy with config file: mcp_config.json
2025-12-29 09:38:29,310 - INFO - Starting MCPO Server...
2025-12-29 09:38:29,310 - INFO - Name: MCP OpenAPI Proxy
2025-12-29 09:38:29,310 - INFO - Version: 1.0
2025-12-29 09:38:29,310 - INFO - Description: Automatically generated API from MCP Tool Schemas
2025-12-29 09:38:29,310 - INFO - Hostname: nicolargo-xps15
2025-12-29 09:38:29,310 - INFO - Port: 8000
2025-12-29 09:38:29,310 - INFO - API Key: Not Provided
2025-12-29 09:38:29,310 - INFO - CORS Allowed Origins: ['*']
2025-12-29 09:38:29,310 - INFO - Path Prefix: /
2025-12-29 09:38:29,310 - INFO - Root Path:
2025-12-29 09:38:29,311 - INFO - Loading MCP server configurations from: mcp_config.json
2025-12-29 09:38:29,311 - INFO - Configuring MCP Servers:
2025-12-29 09:38:29,311 - INFO - Uvicorn server starting...
INFO: Started server process [167852]
INFO: Waiting for application startup.
2025-12-29 09:38:29,324 - INFO - Initiating connection for server: 'glances'...
2025-12-29 09:38:29,800 - INFO - Successfully connected to 'glances'.
2025-12-29 09:38:29,800 - INFO - --------------------------
INFO: Application startup complete.
INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
```
Check:
```bash
curl http://localhost:8000/glances/openapi.json
```
And configure Open WebUI to use MCP server at `http://host.docker.internal:8000/glances`.
Open Open WebUI: Go to `http://localhost:3000`
Open Settings:
Click your profile icon (top right)
Select "Settings"
Add Tool Server:
Navigate to "Tools" section in the left sidebar
Click the "+" button to add a new tool server
Configure the Connection:
Server URL: `http://172.17.0.1:8000/glances`
API Key: Leave blank (unless you configured one)
Enable the Tool:
The Glances tools should now appear in your tools list
In the conversation interface, click the "Tools" button (usually represented by a toolbox icon) and enable the Glances tool.
Test a prompt like: "What is the current CPU usage on my system ?"
## Intel GPU Acceleration (Optional) ## Intel GPU Acceleration (Optional)
@ -114,8 +181,8 @@ environment:
Then restart: Then restart:
```bash ```bash
docker-compose down docker compose down
docker-compose up -d docker compose up -d
``` ```
## Testing Your MCP Server ## Testing Your MCP Server
@ -143,18 +210,18 @@ curl http://localhost:61208/api/4/cpu
```bash ```bash
# View logs # View logs
docker-compose logs -f ollama docker compose logs -f ollama
docker-compose logs -f open-webui docker compose logs -f open-webui
docker-compose logs -f glances docker compose logs -f glances
# Restart services # Restart services
docker-compose restart docker compose restart
# Stop all services # Stop all services
docker-compose down docker compose down
# Stop and remove volumes (clean start) # Stop and remove volumes (clean start)
docker-compose down -v docker compose down -v
# Check Ollama models # Check Ollama models
docker exec -it ollama ollama list docker exec -it ollama ollama list
@ -167,7 +234,7 @@ docker stats
### Ollama not responding ### Ollama not responding
```bash ```bash
docker-compose restart ollama docker compose restart ollama
docker exec -it ollama ollama list docker exec -it ollama ollama list
``` ```
@ -177,7 +244,7 @@ docker exec -it ollama ollama list
curl http://localhost:61208/api/4/status curl http://localhost:61208/api/4/status
# View Glances logs # View Glances logs
docker-compose logs glances docker compose logs glances
``` ```
### Low memory issues ### Low memory issues
@ -186,11 +253,11 @@ docker-compose logs glances
docker exec -it ollama ollama pull llama3.2:1b docker exec -it ollama ollama pull llama3.2:1b
# Or configure Ollama to use less memory # Or configure Ollama to use less memory
docker-compose down docker compose down
# Add to ollama service environment: # Add to ollama service environment:
# - OLLAMA_MAX_LOADED_MODELS=1 # - OLLAMA_MAX_LOADED_MODELS=1
# - OLLAMA_NUM_PARALLEL=1 # - OLLAMA_NUM_PARALLEL=1
docker-compose up -d docker compose up -d
``` ```
## Recommended Models for Your Hardware ## Recommended Models for Your Hardware

View File

@ -11,10 +11,10 @@ services:
- ollama_data:/root/.ollama - ollama_data:/root/.ollama
restart: unless-stopped restart: unless-stopped
# Uncomment below for Intel GPU support (requires Intel GPU drivers on host) # Uncomment below for Intel GPU support (requires Intel GPU drivers on host)
# devices: devices:
# - /dev/dri:/dev/dri - /dev/dri:/dev/dri
# environment: environment:
# - OLLAMA_GPU_DRIVER=intel - OLLAMA_GPU_DRIVER=intel
# Open WebUI - Web Interface for Ollama # Open WebUI - Web Interface for Ollama
open-webui: open-webui:

11
mcp/mcp_config.json Normal file
View File

@ -0,0 +1,11 @@
{
"mcpServers": {
"glances": {
"command": "python3",
"args": ["glances_mcp.py"],
"env": {
"GLANCES_API_URL": "http://localhost:61208/api/4"
}
}
}
}

View File

@ -10,7 +10,6 @@ classifiers = [
"Intended Audience :: System Administrators", "Intended Audience :: System Administrators",
"Operating System :: OS Independent", "Operating System :: OS Independent",
"Programming Language :: Python :: 3", "Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.13",
@ -24,6 +23,9 @@ dependencies = [
"windows-curses; platform_system == 'Windows'", "windows-curses; platform_system == 'Windows'",
"shtab; platform_system != 'Windows'", "shtab; platform_system != 'Windows'",
"jinja2", "jinja2",
"mcpo>=0.0.19",
"mcp>=1.25.0",
"fastmcp>=2.14.1",
] ]
description = "A cross-platform curses-based monitoring tool" description = "A cross-platform curses-based monitoring tool"
dynamic = ["version"] dynamic = ["version"]
@ -31,7 +33,7 @@ keywords = ["cli", "curses", "monitoring", "system"]
license = "LGPL-3.0-only" license = "LGPL-3.0-only"
name = "Glances" name = "Glances"
readme = "README-pypi.rst" readme = "README-pypi.rst"
requires-python = ">=3.10" requires-python = ">=3.11"
urls.Homepage = "https://github.com/nicolargo/glances" urls.Homepage = "https://github.com/nicolargo/glances"
[dependency-groups] [dependency-groups]