The primitive layer for multi-modal AI
All capabilities. All providers. One interface.
Primitives, not frameworks.
Type-safe, capability-provider-agnostic primitives .
- Unified Interface: One API for OpenAI, Anthropic, Gemini, Mistral, and 14+ others.
- True Multi-Modal: Text, Image, Audio, Video, Embeddings, Search βall first-class citizens.
- Type-Safe by Design: Full Pydantic validation and IDE autocomplete.
- Zero Lock-In: Switch providers instantly by changing a single config string.
- Primitives, Not Frameworks: No agents, no chains, no magic. Just clean I/O.
- Lightweight Architecture: No vendor SDKs. Pure, fast HTTP.
from celeste import create_client
# "We need a catchy slogan for our new eco-friendly sneaker."
client = create_client(
capability="text-generation",
model="gpt-5"
)
slogan = await client.generate("Write a slogan for an eco-friendly sneaker.")
print(slogan.content)from pydantic import BaseModel, Field
class ProductCampaign(BaseModel):
visual_prompt: str
audio_script: str
# 2. Extract Campaign Assets (Anthropic)
# -----------------------------------------------------
extract_client = create_client(Capability.TEXT_GENERATION, model="claude-opus-4-1")
campaign_output = await extract_client.generate(
f"Create campaign assets for slogan: {slogan.content}",
output_schema=ProductCampaign
)
campaign = campaign_output.content
# 3. Generate Ad Visual (Flux)
# -----------------------------------------------------
image_client = create_client(Capability.IMAGE_GENERATION, model="flux-2-flex")
image_output = await image_client.generate(
campaign.visual_prompt,
aspect_ratio="1:1"
)
image = image_output.content
# 4. Generate Radio Spot (ElevenLabs)
# -----------------------------------------------------
speech_client = create_client(Capability.SPEECH_GENERATION, model="eleven_v3")
speech_output = await speech_client.generate(
campaign.audio_script,
voice="adam"
)
speech = speech_output.contentNo special cases. No separate libraries. One consistent interface.
from pydantic import BaseModel
class User(BaseModel):
name: str
age: int
# Model IDs
anthropic_model_id = "claude-4-5-sonnet"
google_model_id = "gemini-2.5-flash"# β Anthropic Way
from anthropic import Anthropic
import json
client = Anthropic()
response = client.messages.create(
model=anthropic_model_id,
messages=[
{"role": "user",
"content": "Extract user info: John is 30"}
],
output_format={
"type": "json_schema",
"schema": User.model_json_schema()
}
)
user_data = json.loads(response.content[0].text)# β Google Gemini Way
from google import genai
from google.genai import types
client = genai.Client()
response = await client.aio.models.generate_content(
model=gemini_model_id,
contents="Extract user info: John is 30",
config=types.GenerateContentConfig(
response_mime_type="application/json",
response_schema=User
)
)
user = response.parsed# β
Celeste Way
from celeste import create_client, Capability
client = create_client(
Capability.TEXT_GENERATION,
model=google_model_id # <--- Choose any model from any provider
)
response = await client.generate(
prompt="Extract user info: John is 30",
output_schema=User # <--- Unified parameter working across all providers
)
user = response.content # Already parsed as User instanceuv add "celeste-ai[text-generation]" # Text only
uv add "celeste-ai[image-generation]" # Image generation
uv add "celeste-ai[all]" # Everything# Full IDE autocomplete
response = await client.generate(
prompt="Explain AI",
temperature=0.7, # β
Validated (0.0-2.0)
max_tokens=100, # β
Validated (int)
)
# Typed response
print(response.content) # str (IDE knows the type)
print(response.usage.input_tokens) # int
print(response.metadata["model"]) # strCatch errors before production.
We welcome contributions! See CONTRIBUTING.md.
Request a provider: GitHub Issues Report bugs: GitHub Issues
MIT license β see LICENSE for details.
Get Started β’ Documentation β’ GitHub
Made with β€οΈ by developers tired of framework lock-in