diff --git a/README.md b/README.md index d8007ed..37cec3b 100644 --- a/README.md +++ b/README.md @@ -148,6 +148,27 @@ Monitor your API usage through the PostgreSQL logs: - Identify usage patterns and optimize costs - Maintain compliance with audit requirements +### Metrics Dashboard + +OpenProxy includes a lightweight Next.js dashboard for real-time metrics visualization: + +```bash +cd dashboard +npm install +cp .env.example .env +# Configure DATABASE_URL in .env +npm run dev +``` + +The dashboard (available at `http://localhost:3008`) provides: +- **Real-time Overview**: Total requests, tokens, costs, and response times +- **Model Breakdown**: Usage statistics grouped by LLM model +- **Hourly Trends**: Visual charts showing request patterns over time +- **Recent Requests**: Detailed table of recent API calls +- **Auto-refresh**: Automatic updates every 30 seconds + +See [dashboard/README.md](./dashboard/README.md) for detailed setup instructions. + ## 🤝 Contributing Feel free to submit issues and enhancement requests! diff --git a/dashboard/.env.example b/dashboard/.env.example new file mode 100644 index 0000000..9c0bc76 --- /dev/null +++ b/dashboard/.env.example @@ -0,0 +1,5 @@ +# PostgreSQL connection string (same as proxy server) +DATABASE_URL=postgresql://user:password@localhost:5432/database + +# Database table name (default: llm_proxy) +DATABASE_TABLE=llm_proxy diff --git a/dashboard/.gitignore b/dashboard/.gitignore new file mode 100644 index 0000000..8ccc874 --- /dev/null +++ b/dashboard/.gitignore @@ -0,0 +1,34 @@ +# dependencies +/node_modules +/.pnp +.pnp.js + +# testing +/coverage + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# local env files +.env*.local +.env + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts diff --git a/dashboard/README.md b/dashboard/README.md new file mode 100644 index 0000000..49016ca --- /dev/null +++ b/dashboard/README.md @@ -0,0 +1,195 @@ +# OpenProxy Metrics Dashboard + +A lightweight Next.js dashboard for visualizing OpenProxy LLM request metrics in real-time. + +## Features + +- **Real-time Metrics Overview**: Total requests, tokens, costs, and response times +- **Model Breakdown**: Usage statistics grouped by LLM model +- **Hourly Trends**: Visual charts showing request patterns over time +- **Recent Requests**: Detailed table of recent API calls +- **Auto-refresh**: Automatic updates every 30 seconds +- **Time Range Selection**: View metrics for the last hour, 6 hours, 24 hours, or 7 days + +## Prerequisites + +- Node.js 18 or higher +- PostgreSQL database (same as the proxy server) +- OpenProxy proxy server running + +## Installation + +1. Navigate to the dashboard directory: + ```bash + cd dashboard + ``` + +2. Install dependencies: + ```bash + npm install + ``` + +3. Create a `.env` file (copy from `.env.example`): + ```bash + cp .env.example .env + ``` + +4. Configure your `.env` file: + ```env + DATABASE_URL=postgresql://user:password@localhost:5432/database + DATABASE_TABLE=llm_proxy + ``` + +## Running the Dashboard + +### Development Mode + +```bash +npm run dev +``` + +The dashboard will be available at `http://localhost:3008` + +### Production Mode + +1. Build the application: + ```bash + npm run build + ``` + +2. Start the production server: + ```bash + npm start + ``` + +## Dashboard Sections + +### 1. Overview Cards +Displays key metrics at a glance: +- Total requests processed +- Total tokens consumed +- Total cost incurred +- Average response time +- Number of unique models used +- Number of unique client IPs + +### 2. Hourly Trends +Two charts showing: +- Requests count and average response time over time +- Token usage and costs over time + +### 3. Model Breakdown +Table showing per-model statistics: +- Request count +- Total tokens used +- Total cost +- Average response time + +### 4. Recent Requests +Detailed table of recent API calls showing: +- Timestamp +- Model used +- Token breakdown (prompt + completion = total) +- Cost +- Response time +- HTTP status code +- Client IP address +- Whether the request was streamed + +## Configuration + +### Port +The dashboard runs on port 3008 by default. To change this, modify the `dev` and `start` scripts in `package.json`: + +```json +"dev": "next dev -p YOUR_PORT", +"start": "next start -p YOUR_PORT" +``` + +### Database Connection +Ensure the `DATABASE_URL` in your `.env` file matches the PostgreSQL connection string used by the proxy server. + +### Time Ranges +Available time ranges: +- Last Hour (1 hour) +- Last 6 Hours +- Last 24 Hours (default) +- Last 7 Days (168 hours) + +## Troubleshooting + +### "Failed to fetch metrics" Error +- Verify that the `DATABASE_URL` in `.env` is correct +- Ensure PostgreSQL is running and accessible +- Check that the `llm_proxy` table exists in your database +- Verify network connectivity to the database + +### Empty Dashboard +- Ensure the proxy server is running and processing requests +- Verify that requests are being logged to the database +- Check that the `DATABASE_TABLE` name matches your configuration + +### Port Conflicts +If port 3008 is already in use, change the port in `package.json` scripts. + +## Technology Stack + +- **Framework**: Next.js 14 (React 18) +- **Charts**: Recharts +- **Database**: PostgreSQL (via `pg` driver) +- **Language**: TypeScript +- **Styling**: Inline CSS (no external dependencies) + +## Architecture + +``` +dashboard/ +├── app/ +│ ├── api/ +│ │ └── metrics/ +│ │ └── route.ts # API endpoint for fetching metrics +│ ├── layout.tsx # Root layout +│ └── page.tsx # Main dashboard page +├── components/ +│ ├── MetricsOverview.tsx # Overview cards component +│ ├── ModelBreakdown.tsx # Model statistics table +│ ├── RecentRequests.tsx # Recent requests table +│ └── TrendsChart.tsx # Hourly trends charts +├── package.json +├── tsconfig.json +├── next.config.js +└── README.md +``` + +## API Endpoints + +### GET `/api/metrics` + +Query parameters: +- `hours` (optional): Number of hours to look back (default: 24) +- `limit` (optional): Maximum number of recent requests to return (default: 100) + +Response: +```json +{ + "success": true, + "data": { + "summary": { + "totalRequests": 1234, + "totalTokens": 567890, + "totalCost": 12.34, + "avgResponseTime": 450.5, + "uniqueModels": 3, + "uniqueClients": 15 + }, + "recentRequests": [...], + "modelBreakdown": [...], + "hourlyTrends": [...] + }, + "timeRange": "24 hours" +} +``` + +## License + +Same as OpenProxy parent project. diff --git a/dashboard/app/api/metrics/route.ts b/dashboard/app/api/metrics/route.ts new file mode 100644 index 0000000..d431166 --- /dev/null +++ b/dashboard/app/api/metrics/route.ts @@ -0,0 +1,119 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { Pool } from 'pg'; + +const pool = new Pool({ + connectionString: process.env.DATABASE_URL, +}); + +const TABLE_NAME = process.env.DATABASE_TABLE || 'llm_proxy'; + +export async function GET(request: NextRequest) { + const { searchParams } = new URL(request.url); + const hours = parseInt(searchParams.get('hours') || '24', 10); + const limit = parseInt(searchParams.get('limit') || '100', 10); + + try { + const client = await pool.connect(); + + try { + // Get summary statistics + const summaryQuery = ` + SELECT + COUNT(*) as total_requests, + SUM(total_tokens) as total_tokens_used, + SUM(total_cost) as total_cost, + AVG(response_time) as avg_response_time, + COUNT(DISTINCT model) as unique_models, + COUNT(DISTINCT client_ip) as unique_clients + FROM ${TABLE_NAME} + WHERE timestamp >= NOW() - INTERVAL '${hours} hours' + `; + const summaryResult = await client.query(summaryQuery); + const summary = summaryResult.rows[0]; + + // Get recent requests + const recentQuery = ` + SELECT + request_id, + timestamp, + model, + prompt_tokens, + completion_tokens, + total_tokens, + total_cost, + response_time, + response_status, + client_ip, + stream + FROM ${TABLE_NAME} + WHERE timestamp >= NOW() - INTERVAL '${hours} hours' + ORDER BY timestamp DESC + LIMIT ${limit} + `; + const recentResult = await client.query(recentQuery); + const recentRequests = recentResult.rows; + + // Get model breakdown + const modelQuery = ` + SELECT + model, + COUNT(*) as request_count, + SUM(total_tokens) as total_tokens, + SUM(total_cost) as total_cost, + AVG(response_time) as avg_response_time + FROM ${TABLE_NAME} + WHERE timestamp >= NOW() - INTERVAL '${hours} hours' + GROUP BY model + ORDER BY request_count DESC + `; + const modelResult = await client.query(modelQuery); + const modelBreakdown = modelResult.rows; + + // Get hourly trends + const trendsQuery = ` + SELECT + DATE_TRUNC('hour', timestamp) as hour, + COUNT(*) as requests, + SUM(total_tokens) as tokens, + SUM(total_cost) as cost, + AVG(response_time) as avg_response_time + FROM ${TABLE_NAME} + WHERE timestamp >= NOW() - INTERVAL '${hours} hours' + GROUP BY hour + ORDER BY hour ASC + `; + const trendsResult = await client.query(trendsQuery); + const hourlyTrends = trendsResult.rows; + + return NextResponse.json({ + success: true, + data: { + summary: { + totalRequests: parseInt(summary.total_requests || '0'), + totalTokens: parseInt(summary.total_tokens_used || '0'), + totalCost: parseFloat(summary.total_cost || '0'), + avgResponseTime: parseFloat(summary.avg_response_time || '0'), + uniqueModels: parseInt(summary.unique_models || '0'), + uniqueClients: parseInt(summary.unique_clients || '0'), + }, + recentRequests, + modelBreakdown, + hourlyTrends, + }, + timeRange: `${hours} hours`, + }); + } finally { + client.release(); + } + } catch (error) { + console.error('Database error:', error); + return NextResponse.json( + { + success: false, + error: 'Failed to fetch metrics', + details: error instanceof Error ? error.message : 'Unknown error' + }, + { status: 500 } + ); + } +} diff --git a/dashboard/app/layout.tsx b/dashboard/app/layout.tsx new file mode 100644 index 0000000..5f9562a --- /dev/null +++ b/dashboard/app/layout.tsx @@ -0,0 +1,33 @@ +import type { Metadata } from 'next' + +export const metadata: Metadata = { + title: 'OpenProxy Metrics Dashboard', + description: 'Real-time metrics and analytics for OpenProxy LLM requests', +} + +export default function RootLayout({ + children, +}: { + children: React.ReactNode +}) { + return ( + +
+ + + {children} + + ) +} diff --git a/dashboard/app/page.tsx b/dashboard/app/page.tsx new file mode 100644 index 0000000..331a627 --- /dev/null +++ b/dashboard/app/page.tsx @@ -0,0 +1,221 @@ +'use client'; + +import { useEffect, useState } from 'react'; +import MetricsOverview from '@/components/MetricsOverview'; +import ModelBreakdown from '@/components/ModelBreakdown'; +import RecentRequests from '@/components/RecentRequests'; +import TrendsChart from '@/components/TrendsChart'; + +interface MetricsData { + summary: { + totalRequests: number; + totalTokens: number; + totalCost: number; + avgResponseTime: number; + uniqueModels: number; + uniqueClients: number; + }; + recentRequests: any[]; + modelBreakdown: any[]; + hourlyTrends: any[]; +} + +export default function Dashboard() { + const [data, setData] = useState{error}
+ +No model data available
+| Model | +Requests | +Total Tokens | +Total Cost | +Avg Response Time | +
|---|---|---|---|---|
| + {model.model} + | +{parseInt(model.request_count).toLocaleString()} | +{parseInt(model.total_tokens).toLocaleString()} | +${parseFloat(model.total_cost).toFixed(4)} | +{Math.round(parseFloat(model.avg_response_time))}ms | +
No recent requests
+| Timestamp | +Model | +Tokens | +Cost | +Response Time | +Status | +Client IP | +Stream | +
|---|---|---|---|---|---|---|---|
| + {new Date(req.timestamp).toLocaleString()} + | ++ {req.model} + | +
+
+
+ {req.prompt_tokens} + {req.completion_tokens} = {req.total_tokens}
+
+
+ |
+ ${parseFloat(req.total_cost).toFixed(4)} | +{req.response_time}ms | ++ + {req.response_status} + + | +{req.client_ip} | +{req.stream ? '✓' : '✗'} | +
No trend data available
+