cURL OpenAI (Python) OpenAI (Node)
curl https://api.orq.ai/v2/proxy/chat/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $ORQ_API_KEY" \
-d '{
"model": "openai/gpt-4o",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant."
},
{
"role": "user",
"content": "Hello!"
}
]
}'
from openai import OpenAI
import os
client = OpenAI(
base_url="https://api.orq.ai/v2/proxy",
api_key=os.getenv("ORQ_API_KEY"),
)
completion = client.chat.completions.create(
model="openai/gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Hello!"}
]
)
print(completion.choices[0].message)
import OpenAI from "openai";
const openai = new OpenAI({
baseURL: 'https://api.orq.ai/v2/proxy',
apiKey: process.env.ORQ_API_KEY,
});
async function main() {
const completion = await openai.chat.completions.create({
messages: [{ role: "system", content: "You are a helpful assistant." }],
model: "openai/gpt-4o"
});
console.log(completion.choices[0]);
}
main();
Provider Model Anthropic anthropic/claude-2.0
Anthropic anthropic/claude-2.1
Anthropic anthropic/claude-3-5-haiku-20241022
Azure azure/gpt-35-turbo
Azure azure/gpt-35-turbo-16k
Azure azure/gpt-4
Azure azure/gpt-4-32k
Azure azure/llama-2-13b-chat
Azure azure/llama-2-70b-chat
Azure azure/llama-2-7b-chat
Azure azure/llama-3-70B-instruct
Azure azure/llama-3-8B-instruct
Azure azure/llama-3.1-405B-instruct
Azure azure/llama-3.1-70B-instruct
Azure azure/llama-3.1-8B
Azure azure/mistral-large
Azure azure/o1
Azure azure/o1-mini
Azure azure/o3-mini
Cohere cohere/command-r
Cohere cohere/command-r-plus
Vertex AI google/chat-bison-32k@002
Vertex AI google/chat-bison@001
Vertex AI google/chat-bison@002
Vertex AI google/claude-3-5-haiku@20241022
Vertex AI google/gemini-1.0-pro-001
Google AI google-ai/gemini-1.0-pro
Groq groq/deepseek-r1-distill-llama-70b
Groq groq/deepseek-r1-distill-qwen-32b
Groq groq/gemma2-9b-it
Groq groq/llama-3.1-8b-instant
Groq groq/llama-3.2-11b-vision-preview
Groq groq/llama-3.2-1b-preview
Groq groq/llama-3.2-3b-preview
Groq groq/llama-3.2-90b-vision-preview
Groq groq/llama-3.3-70b-specdec
Groq groq/llama-3.3-70b-versatile
Groq groq/llama-guard-3-8b
Groq groq/llama3-70b-8192
Groq groq/llama3-8b-8192
Groq groq/mistral-saba-24b
Groq groq/mixtral-8x7b-32768
Groq groq/qwen-2.5-32b
Groq groq/qwen-2.5-coder-32b
Groq groq/qwen-qwq-32b
OpenAI openai/gpt-3.5-turbo
OpenAI openai/gpt-3.5-turbo-0125
OpenAI openai/gpt-3.5-turbo-1106
OpenAI openai/gpt-3.5-turbo-16k
OpenAI openai/gpt-4
OpenAI openai/gpt-4-0125-preview
OpenAI openai/gpt-4-0314
OpenAI openai/gpt-4-0613
OpenAI openai/gpt-4-1106-preview
OpenAI openai/gpt-4-32k
OpenAI openai/gpt-4-32k-0613
OpenAI openai/gpt-4-turbo-preview
OpenAI openai/o1-mini
OpenAI openai/o1-mini-2024-09-12
OpenAI openai/o1-preview
OpenAI openai/o1-preview-2024-09-12
Perplexity perplexity/r1-1776
Perplexity perplexity/sonar
Perplexity perplexity/sonar-deep-research
Perplexity perplexity/sonar-pro
Perplexity perplexity/sonar-reasoning
Perplexity perplexity/sonar-reasoning-pro
Together AI togetherai/deepseek-ai/deepseek-llm-67b-chat
Together AI togetherai/deepseek-ai/DeepSeek-R1
Together AI togetherai/deepseek-ai/DeepSeek-V3
Together AI togetherai/meta-llama/Llama-3.3-70B-Instruct-Turbo
Together AI togetherai/meta-llama/Meta-Llama-Guard-3-8B
cURL OpenAI (Python) OpenAI (Node)
curl https://api.orq.ai/v2/proxy/completions \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $ORQ_API_KEY" \
-d '{
"model": "openai/gpt-3.5-turbo-instruct",
"prompt": "Once upon a time",
"max_tokens": 100
}'
from openai import OpenAI
import os
client = OpenAI(
base_url="https://api.orq.ai/v2/proxy",
api_key=os.getenv("ORQ_API_KEY"),
)
completion = client.completions.create(
model="openai/gpt-3.5-turbo-instruct",
prompt="Once upon a time",
max_tokens=100
)
print(completion.choices[0].text)
import OpenAI from "openai";
const openai = new OpenAI({
baseURL: 'https://api.orq.ai/v2/proxy',
apiKey: process.env.ORQ_API_KEY,
});
async function main() {
const completion = await openai.completions.create({
model: "openai/gpt-3.5-turbo-instruct",
prompt: "Once upon a time",
max_tokens: 100
});
console.log(completion.choices[0].text);
}
main();
Provider Model Anthropic anthropic/claude-instant-1.2
AWS Bedrock aws/ai21.j2-mid-v1
AWS Bedrock aws/ai21.j2-ultra-v1
AWS Bedrock aws/amazon.titan-text-express-v1
AWS Bedrock aws/anthropic.claude-instant-v1
AWS Bedrock aws/anthropic.claude-v2
AWS Bedrock aws/anthropic.claude-v2:1
AWS Bedrock aws/cohere.command-light-text-v14
AWS Bedrock aws/cohere.command-text-v14
Azure azure/gpt-35-turbo-instruct
Azure azure/llama-2-13b
Azure azure/llama-2-70b
Azure azure/llama-2-7b
Cohere cohere/command
Cohere cohere/command-light
Vertex AI google/text-bison-32k@002
Vertex AI google/text-bison@001
Vertex AI google/text-bison@002
Vertex AI google/text-unicorn@001
OpenAI openai/gpt-3.5-turbo-instruct
Provider Model Azure azure/text-embedding-ada-002
Cohere cohere/embed-english-light-v3.0
Cohere cohere/embed-english-v3.0
Cohere cohere/embed-multilingual-light-v3.0
Cohere cohere/embed-multilingual-v3.0
Jina AI jina/jina-clip-v1
Jina AI jina/jina-clip-v2
Jina AI jina/jina-embeddings-v2-base-code
Jina AI jina/jina-embeddings-v2-base-de
Jina AI jina/jina-embeddings-v2-base-en
Jina AI jina/jina-embeddings-v2-base-es
Jina AI jina/jina-embeddings-v2-base-zh
Jina AI jina/jina-embeddings-v3
OpenAI openai/text-embedding-3-large
OpenAI openai/text-embedding-3-small
OpenAI openai/text-embedding-ada-002
cURL OpenAI (Python) OpenAI (Node)
curl https://api.orq.ai/v2/proxy/images/generations \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $ORQ_API_KEY" \
-d '{
"model": "openai/dall-e-3",
"prompt": "A beautiful sunset over mountains",
"n": 1,
"size": "1024x1024"
}'
from openai import OpenAI
import os
client = OpenAI(
base_url="https://api.orq.ai/v2/proxy",
api_key=os.getenv("ORQ_API_KEY"),
)
response = client.images.generate(
model="openai/dall-e-3",
prompt="A beautiful sunset over mountains",
n=1,
size="1024x1024"
)
print(response.data[0].url)
import OpenAI from "openai";
const openai = new OpenAI({
baseURL: 'https://api.orq.ai/v2/proxy',
apiKey: process.env.ORQ_API_KEY,
});
async function main() {
const response = await openai.images.generate({
model: "openai/dall-e-3",
prompt: "A beautiful sunset over mountains",
n: 1,
size: "1024x1024"
});
console.log(response.data[0].url);
}
main();
Provider Model Azure azure/dall-e-3
FAL fal/flux-pro/new
FAL fal/flux/dev
FAL fal/flux/schnell
Vertex AI google/imagegeneration@006
Vertex AI google/imagen-3.0-fast-generate-001
Vertex AI google/imagen-3.0-generate-001
Leonardo AI leonardoai/leonard-diffusion-xl
Leonardo AI leonardoai/leonard-kino-xl
Leonardo AI leonardoai/leonard-lightning-xl
Leonardo AI leonardoai/leonard-vision-xl
OpenAI openai/dall-e-2
OpenAI openai/dall-e-3
cURL OpenAI (Python) OpenAI (Node)
curl https://api.orq.ai/v2/proxy/moderations \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $ORQ_API_KEY" \
-d '{
"model": "openai/text-moderation-latest",
"input": "I want to check if this text is appropriate."
}'
from openai import OpenAI
import os
client = OpenAI(
base_url="https://api.orq.ai/v2/proxy",
api_key=os.getenv("ORQ_API_KEY"),
)
response = client.moderations.create(
model="openai/text-moderation-latest",
input="I want to check if this text is appropriate."
)
print(response.results[0])
import OpenAI from "openai";
const openai = new OpenAI({
baseURL: 'https://api.orq.ai/v2/proxy',
apiKey: process.env.ORQ_API_KEY,
});
async function main() {
const response = await openai.moderations.create({
model: "openai/text-moderation-latest",
input: "I want to check if this text is appropriate.",
});
console.log(response.results[0]);
}
main();
Provider Model OpenAI openai/omni-moderation-2024-09-26
OpenAI openai/omni-moderation-latest
OpenAI openai/text-moderation-007
OpenAI openai/text-moderation-latest
OpenAI openai/text-moderation-stable
cURL
curl https://api.orq.ai/v2/proxy/rerank \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $ORQ_API_KEY" \
-d '{
"model": "cohere/rerank-english-v3.0",
"query": "What is machine learning?",
"documents": [
"Machine learning is a branch of AI",
"Machine learning uses data to improve",
"AI is changing the world"
]
}'
Provider Model Cohere cohere/rerank-english-v3.0
Cohere cohere/rerank-multilingual-v3.0
Cohere cohere/rerank-v3.5
Jina AI jina/jina-colbert-v2
Jina AI jina/jina-reranker-v1-base-en
Jina AI jina/jina-reranker-v1-tiny-en
Jina AI jina/jina-reranker-v1-turbo-en
Jina AI jina/jina-reranker-v2-base-multilingual
cURL OpenAI (Python) OpenAI (Node)
curl https://api.orq.ai/v2/proxy/audio/transcriptions \
-H "Authorization: Bearer $ORQ_API_KEY" \
-F file="@/path/to/audio.mp3" \
-F model="openai/whisper-1"
from openai import OpenAI
import os
client = OpenAI(
base_url="https://api.orq.ai/v2/proxy",
api_key=os.getenv("ORQ_API_KEY"),
)
audio_file = open("speech.mp3", "rb")
transcript = client.audio.transcriptions.create(
model="openai/whisper-1",
file=audio_file
)
print(transcript.text)
import OpenAI from "openai";
import fs from "fs";
const openai = new OpenAI({
baseURL: 'https://api.orq.ai/v2/proxy',
apiKey: process.env.ORQ_API_KEY,
});
async function main() {
const transcript = await openai.audio.transcriptions.create({
file: fs.createReadStream("speech.mp3"),
model: "openai/whisper-1",
});
console.log(transcript.text);
}
main();
Provider Model Azure azure/whisper
OpenAI openai/whisper-1
cURL OpenAI (Python) OpenAI (Node)
curl https://api.orq.ai/v2/proxy/audio/speech \
-H "Authorization: Bearer $ORQ_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"model": "openai/tts-1",
"input": "Hello world!",
"voice": "alloy"
}' --output speech.mp3
from openai import OpenAI
import os
client = OpenAI(
base_url="https://api.orq.ai/v2/proxy",
api_key=os.getenv("ORQ_API_KEY"),
)
response = client.audio.speech.create(
model="openai/tts-1",
voice="alloy",
input="Hello world!"
)
response.stream_to_file("speech.mp3")
import OpenAI from "openai";
import fs from "fs";
const openai = new OpenAI({
baseURL: 'https://api.orq.ai/v2/proxy',
apiKey: process.env.ORQ_API_KEY,
});
async function main() {
const mp3 = await openai.audio.speech.create({
model: "openai/tts-1",
voice: "alloy",
input: "Hello world!",
});
const buffer = Buffer.from(await mp3.arrayBuffer());
await fs.promises.writeFile("speech.mp3", buffer);
}
main();
Provider Model Eleven Labs elevenlabs/eleven_flash_v2
Eleven Labs elevenlabs/eleven_flash_v2_5
Eleven Labs elevenlabs/eleven_multilingual_v2
Eleven Labs elevenlabs/eleven_turbo_v2_5
OpenAI openai/tts-1
OpenAI openai/tts-1-hd
The following voices are available for Text-to-Speech models:
alloy
: Neutral, versatile voice
echo
: Neutral, soft-spoken voice
fable
: Expressive, narrative-focused voice
onyx
: Deep, authoritative voice
nova
: Warm, natural voice
shimmer
: Clear, optimistic voice
aria
: Neutral, versatile voice
roger
: Deep, authoritative voice
sarah
: Warm, friendly voice
laura
: Soft, gentle voice
charlie
: Casual, conversational voice
george
: Professional, articulate voice
callum
: Youthful, energetic voice
river
: Calm, soothing voice
liam
: Clear, confident voice
charlotte
: Elegant, refined voice
alice
: Bright, cheerful voice
matilda
: Thoughtful, measured voice
will
: Reliable, trustworthy voice
jessica
: Engaging, expressive voice
eric
: Authoritative, commanding voice
chris
: Friendly, approachable voice
brian
: Mature, distinguished voice
daniel
: Versatile, balanced voice
lily
: Sweet, melodious voice
bill
: Grounded, authentic voice