diff --git a/README.md b/README.md
index c5ef8eb..65d4136 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@ You can also replace `hub` with `diagram` in any Github URL to access its diagra
- 👀 **Instant Visualization**: Convert any GitHub repository structure into a system design / architecture diagram
- 🎨 **Interactivity**: Click on components to navigate directly to source files and relevant directories
-- ⚡ **Fast Generation**: Powered by Claude 3.5 Sonnet for quick and accurate diagrams
+- ⚡ **Fast Generation**: Powered by OpenAI o4-mini for quick and accurate diagrams
- 🔄 **Customization**: Modify and regenerate diagrams with custom instructions
- 🌐 **API Access**: Public API available for integration (WIP)
@@ -22,7 +22,7 @@ You can also replace `hub` with `diagram` in any Github URL to access its diagra
- **Frontend**: Next.js, TypeScript, Tailwind CSS, ShadCN
- **Backend**: FastAPI, Python, Server Actions
- **Database**: PostgreSQL (with Drizzle ORM)
-- **AI**: OpenAI o3-mini
+- **AI**: OpenAI o4-mini
- **Deployment**: Vercel (Frontend), EC2 (Backend)
- **CI/CD**: GitHub Actions
- **Analytics**: PostHog, Api-Analytics
@@ -31,7 +31,7 @@ You can also replace `hub` with `diagram` in any Github URL to access its diagra
I created this because I wanted to contribute to open-source projects but quickly realized their codebases are too massive for me to dig through manually, so this helps me get started - but it's definitely got many more use cases!
-Given any public (or private!) GitHub repository it generates diagrams in Mermaid.js with OpenAI's o3-mini! (Previously Claude 3.5 Sonnet)
+Given any public (or private!) GitHub repository it generates diagrams in Mermaid.js with OpenAI's o4-mini! (Previously Claude 3.5 Sonnet)
I extract information from the file tree and README for details and interactivity (you can click components to be taken to relevant files and directories)
diff --git a/backend/app/routers/generate.py b/backend/app/routers/generate.py
index 7ed649e..51befcc 100644
--- a/backend/app/routers/generate.py
+++ b/backend/app/routers/generate.py
@@ -2,7 +2,7 @@
from fastapi.responses import StreamingResponse
from dotenv import load_dotenv
from app.services.github_service import GitHubService
-from app.services.o3_mini_openai_service import OpenAIo3Service
+from app.services.o4_mini_openai_service import OpenAIo4Service
from app.prompts import (
SYSTEM_FIRST_PROMPT,
SYSTEM_SECOND_PROMPT,
@@ -21,11 +21,11 @@
load_dotenv()
-router = APIRouter(prefix="/generate", tags=["Claude"])
+router = APIRouter(prefix="/generate", tags=["OpenAI o4-mini"])
# Initialize services
# claude_service = ClaudeService()
-o3_service = OpenAIo3Service()
+o4_service = OpenAIo4Service()
# cache github data to avoid double API calls from cost and generate
@@ -65,8 +65,8 @@ async def get_generation_cost(request: Request, body: ApiRequest):
# file_tree_tokens = claude_service.count_tokens(file_tree)
# readme_tokens = claude_service.count_tokens(readme)
- file_tree_tokens = o3_service.count_tokens(file_tree)
- readme_tokens = o3_service.count_tokens(readme)
+ file_tree_tokens = o4_service.count_tokens(file_tree)
+ readme_tokens = o4_service.count_tokens(readme)
# CLAUDE: Calculate approximate cost
# Input cost: $3 per 1M tokens ($0.000003 per token)
@@ -148,13 +148,13 @@ async def event_generator():
# Token count check
combined_content = f"{file_tree}\n{readme}"
- token_count = o3_service.count_tokens(combined_content)
+ token_count = o4_service.count_tokens(combined_content)
if 50000 < token_count < 195000 and not body.api_key:
yield f"data: {json.dumps({'error': f'File tree and README combined exceeds token limit (50,000). Current size: {token_count} tokens. This GitHub repository is too large for my wallet, but you can continue by providing your own OpenAI API key.'})}\n\n"
return
elif token_count > 195000:
- yield f"data: {json.dumps({'error': f'Repository is too large (>195k tokens) for analysis. OpenAI o3-mini\'s max context length is 200k tokens. Current size: {token_count} tokens.'})}\n\n"
+ yield f"data: {json.dumps({'error': f'Repository is too large (>195k tokens) for analysis. OpenAI o4-mini\'s max context length is 200k tokens. Current size: {token_count} tokens.'})}\n\n"
return
# Prepare prompts
@@ -173,11 +173,11 @@ async def event_generator():
)
# Phase 1: Get explanation
- yield f"data: {json.dumps({'status': 'explanation_sent', 'message': 'Sending explanation request to o3-mini...'})}\n\n"
+ yield f"data: {json.dumps({'status': 'explanation_sent', 'message': 'Sending explanation request to o4-mini...'})}\n\n"
await asyncio.sleep(0.1)
yield f"data: {json.dumps({'status': 'explanation', 'message': 'Analyzing repository structure...'})}\n\n"
explanation = ""
- async for chunk in o3_service.call_o3_api_stream(
+ async for chunk in o4_service.call_o4_api_stream(
system_prompt=first_system_prompt,
data={
"file_tree": file_tree,
@@ -195,11 +195,11 @@ async def event_generator():
return
# Phase 2: Get component mapping
- yield f"data: {json.dumps({'status': 'mapping_sent', 'message': 'Sending component mapping request to o3-mini...'})}\n\n"
+ yield f"data: {json.dumps({'status': 'mapping_sent', 'message': 'Sending component mapping request to o4-mini...'})}\n\n"
await asyncio.sleep(0.1)
yield f"data: {json.dumps({'status': 'mapping', 'message': 'Creating component mapping...'})}\n\n"
full_second_response = ""
- async for chunk in o3_service.call_o3_api_stream(
+ async for chunk in o4_service.call_o4_api_stream(
system_prompt=SYSTEM_SECOND_PROMPT,
data={"explanation": explanation, "file_tree": file_tree},
api_key=body.api_key,
@@ -219,11 +219,11 @@ async def event_generator():
]
# Phase 3: Generate Mermaid diagram
- yield f"data: {json.dumps({'status': 'diagram_sent', 'message': 'Sending diagram generation request to o3-mini...'})}\n\n"
+ yield f"data: {json.dumps({'status': 'diagram_sent', 'message': 'Sending diagram generation request to o4-mini...'})}\n\n"
await asyncio.sleep(0.1)
yield f"data: {json.dumps({'status': 'diagram', 'message': 'Generating diagram...'})}\n\n"
mermaid_code = ""
- async for chunk in o3_service.call_o3_api_stream(
+ async for chunk in o4_service.call_o4_api_stream(
system_prompt=third_system_prompt,
data={
"explanation": explanation,
@@ -231,7 +231,7 @@ async def event_generator():
"instructions": body.instructions,
},
api_key=body.api_key,
- reasoning_effort="medium",
+ reasoning_effort="low",
):
mermaid_code += chunk
yield f"data: {json.dumps({'status': 'diagram_chunk', 'chunk': chunk})}\n\n"
diff --git a/backend/app/services/o4_mini_openai_service.py b/backend/app/services/o4_mini_openai_service.py
new file mode 100644
index 0000000..ba4b96e
--- /dev/null
+++ b/backend/app/services/o4_mini_openai_service.py
@@ -0,0 +1,186 @@
+from openai import OpenAI
+from dotenv import load_dotenv
+from app.utils.format_message import format_user_message
+import tiktoken
+import os
+import aiohttp
+import json
+from typing import AsyncGenerator, Literal
+
+load_dotenv()
+
+
+class OpenAIo4Service:
+ def __init__(self):
+ self.default_client = OpenAI(
+ api_key=os.getenv("OPENAI_API_KEY"),
+ )
+ self.encoding = tiktoken.get_encoding("o200k_base") # Encoder for OpenAI models
+ self.base_url = "https://api.openai.com/v1/chat/completions"
+
+ def call_o4_api(
+ self,
+ system_prompt: str,
+ data: dict,
+ api_key: str | None = None,
+ reasoning_effort: Literal["low", "medium", "high"] = "low",
+ ) -> str:
+ """
+ Makes an API call to OpenAI o4-mini and returns the response.
+
+ Args:
+ system_prompt (str): The instruction/system prompt
+ data (dict): Dictionary of variables to format into the user message
+ api_key (str | None): Optional custom API key
+
+ Returns:
+ str: o4-mini's response text
+ """
+ # Create the user message with the data
+ user_message = format_user_message(data)
+
+ # Use custom client if API key provided, otherwise use default
+ client = OpenAI(api_key=api_key) if api_key else self.default_client
+
+ try:
+ print(
+ f"Making non-streaming API call to o4-mini with API key: {'custom key' if api_key else 'default key'}"
+ )
+
+ completion = client.chat.completions.create(
+ model="o4-mini",
+ messages=[
+ {"role": "system", "content": system_prompt},
+ {"role": "user", "content": user_message},
+ ],
+ max_completion_tokens=12000, # Adjust as needed
+ temperature=0.2,
+ reasoning_effort=reasoning_effort,
+ )
+
+ print("API call completed successfully")
+
+ if completion.choices[0].message.content is None:
+ raise ValueError("No content returned from OpenAI o4-mini")
+
+ return completion.choices[0].message.content
+
+ except Exception as e:
+ print(f"Error in OpenAI o4-mini API call: {str(e)}")
+ raise
+
+ async def call_o4_api_stream(
+ self,
+ system_prompt: str,
+ data: dict,
+ api_key: str | None = None,
+ reasoning_effort: Literal["low", "medium", "high"] = "low",
+ ) -> AsyncGenerator[str, None]:
+ """
+ Makes a streaming API call to OpenAI o4-mini and yields the responses.
+
+ Args:
+ system_prompt (str): The instruction/system prompt
+ data (dict): Dictionary of variables to format into the user message
+ api_key (str | None): Optional custom API key
+
+ Yields:
+ str: Chunks of o4-mini's response text
+ """
+ # Create the user message with the data
+ user_message = format_user_message(data)
+
+ headers = {
+ "Content-Type": "application/json",
+ "Authorization": f"Bearer {api_key or self.default_client.api_key}",
+ }
+
+ # payload = {
+ # "model": "o3-mini",
+ # "messages": [
+ # {
+ # "role": "user",
+ # "content": f"""
+ #
+ # {system_prompt}
+ #
+ #
+ # {user_message}
+ #
+ # """,
+ # },
+ # ],
+ # "max_completion_tokens": 12000,
+ # "stream": True,
+ # }
+
+ payload = {
+ "model": "o4-mini",
+ "messages": [
+ {"role": "system", "content": system_prompt},
+ {"role": "user", "content": user_message},
+ ],
+ "max_completion_tokens": 12000,
+ "stream": True,
+ "reasoning_effort": reasoning_effort,
+ }
+
+ try:
+ async with aiohttp.ClientSession() as session:
+ async with session.post(
+ self.base_url, headers=headers, json=payload
+ ) as response:
+
+ if response.status != 200:
+ error_text = await response.text()
+ print(f"Error response: {error_text}")
+ raise ValueError(
+ f"OpenAI API returned status code {response.status}: {error_text}"
+ )
+
+ line_count = 0
+ async for line in response.content:
+ line = line.decode("utf-8").strip()
+ if not line:
+ continue
+
+ line_count += 1
+
+ if line.startswith("data: "):
+ if line == "data: [DONE]":
+ break
+ try:
+ data = json.loads(line[6:])
+ content = (
+ data.get("choices", [{}])[0]
+ .get("delta", {})
+ .get("content")
+ )
+ if content:
+ yield content
+ except json.JSONDecodeError as e:
+ print(f"JSON decode error: {e} for line: {line}")
+ continue
+
+ if line_count == 0:
+ print("Warning: No lines received in stream response")
+
+ except aiohttp.ClientError as e:
+ print(f"Connection error: {str(e)}")
+ raise ValueError(f"Failed to connect to OpenAI API: {str(e)}")
+ except Exception as e:
+ print(f"Unexpected error in streaming API call: {str(e)}")
+ raise
+
+ def count_tokens(self, prompt: str) -> int:
+ """
+ Counts the number of tokens in a prompt.
+
+ Args:
+ prompt (str): The prompt to count tokens for
+
+ Returns:
+ int: Estimated number of input tokens
+ """
+ num_tokens = len(self.encoding.encode(prompt))
+ return num_tokens
diff --git a/package.json b/package.json
index 09d93ea..7a8e995 100644
--- a/package.json
+++ b/package.json
@@ -36,11 +36,13 @@
"lucide-react": "^0.468.0",
"mermaid": "^11.4.1",
"next": "^15.0.1",
+ "next-themes": "^0.4.6",
"postgres": "^3.4.4",
"posthog-js": "^1.203.1",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-icons": "^5.4.0",
+ "sonner": "^2.0.3",
"svg-pan-zoom": "^3.6.2",
"tailwind-merge": "^2.5.5",
"tailwindcss-animate": "^1.0.7",
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 00f0caa..1422df2 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -56,6 +56,9 @@ importers:
next:
specifier: ^15.0.1
version: 15.1.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
+ next-themes:
+ specifier: ^0.4.6
+ version: 0.4.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
postgres:
specifier: ^3.4.4
version: 3.4.5
@@ -71,6 +74,9 @@ importers:
react-icons:
specifier: ^5.4.0
version: 5.4.0(react@18.3.1)
+ sonner:
+ specifier: ^2.0.3
+ version: 2.0.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1)
svg-pan-zoom:
specifier: ^3.6.2
version: 3.6.2
@@ -2417,6 +2423,12 @@ packages:
natural-compare@1.4.0:
resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==}
+ next-themes@0.4.6:
+ resolution: {integrity: sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA==}
+ peerDependencies:
+ react: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
+ react-dom: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc
+
next@15.1.0:
resolution: {integrity: sha512-QKhzt6Y8rgLNlj30izdMbxAwjHMFANnLwDwZ+WQh5sMhyt4lEBqDK9QpvWHtIM4rINKPoJ8aiRZKg5ULSybVHw==}
engines: {node: ^18.18.0 || ^19.8.0 || >= 20.0.0}
@@ -2891,6 +2903,12 @@ packages:
simple-swizzle@0.2.2:
resolution: {integrity: sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==}
+ sonner@2.0.3:
+ resolution: {integrity: sha512-njQ4Hht92m0sMqqHVDL32V2Oun9W1+PHO9NDv9FHfJjT3JT22IG4Jpo3FPQy+mouRKCXFWO+r67v6MrHX2zeIA==}
+ peerDependencies:
+ react: ^18.0.0 || ^19.0.0 || ^19.0.0-rc
+ react-dom: ^18.0.0 || ^19.0.0 || ^19.0.0-rc
+
source-map-js@1.2.1:
resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==}
engines: {node: '>=0.10.0'}
@@ -5487,6 +5505,11 @@ snapshots:
natural-compare@1.4.0: {}
+ next-themes@0.4.6(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
+ dependencies:
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+
next@15.1.0(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
dependencies:
'@next/env': 15.1.0
@@ -5945,6 +5968,11 @@ snapshots:
is-arrayish: 0.3.2
optional: true
+ sonner@2.0.3(react-dom@18.3.1(react@18.3.1))(react@18.3.1):
+ dependencies:
+ react: 18.3.1
+ react-dom: 18.3.1(react@18.3.1)
+
source-map-js@1.2.1: {}
source-map-support@0.5.21:
diff --git a/src/app/[username]/[repo]/page.tsx b/src/app/[username]/[repo]/page.tsx
index 86c157a..064efa8 100644
--- a/src/app/[username]/[repo]/page.tsx
+++ b/src/app/[username]/[repo]/page.tsx
@@ -8,10 +8,15 @@ import { useDiagram } from "~/hooks/useDiagram";
import { ApiKeyDialog } from "~/components/api-key-dialog";
import { ApiKeyButton } from "~/components/api-key-button";
import { useState } from "react";
+import { useStarReminder } from "~/hooks/useStarReminder";
export default function Repo() {
const [zoomingEnabled, setZoomingEnabled] = useState(false);
const params = useParams<{ username: string; repo: string }>();
+
+ // Use the star reminder hook
+ useStarReminder();
+
const {
diagram,
error,
diff --git a/src/app/layout.tsx b/src/app/layout.tsx
index 33fee68..685e422 100644
--- a/src/app/layout.tsx
+++ b/src/app/layout.tsx
@@ -5,6 +5,7 @@ import { type Metadata } from "next";
import { Header } from "~/components/header";
import { Footer } from "~/components/footer";
import { CSPostHogProvider } from "./providers";
+import { Toaster } from "~/components/ui/sonner";
export const metadata: Metadata = {
title: "GitDiagram",
@@ -81,6 +82,7 @@ export default function RootLayout({
{children}
+