OpenAI - Prompt Management
This article guides you through integrating your SaaS with orq.ai and OpenAI using our Python SDK. By the end of the article, you'll know how to set up a prompt in orq.ai, perform prompt engineering, request a prompt variant using the SDK code generator, map the orq.ai response with OpenAI, send a payload to OpenAI, and report the response back to orq.ai for observability and monitoring.
This guide shows you how to integrate your products with OpenAI using the orq.ai Python SDK.
Step 1: Install the SDK
# orquesta sdk
pip install orquesta-sdk
# OpenAI
pip install openai
// orquesta sdk
npm install @orquesta/node --save
yarn add @orquesta/node
// OpenAI
npm install --save openai
yarn add openai
Step 2: Enable models in the Model Garden
Orq.ai allows you to pick and enable the models of your choice and work with them. Enabling a model(s) is very easy; all you have to do is navigate to the Model Garden and toggle on the model of your choice.
Step 3: Execute prompt
You can find your orq.ai API key in your workspace https://my.orquesta.dev/<workspace-name>/settings/developers
from orquesta_sdk import OrquestaClientOptions, Orquesta
from openai import OpenAI
api_key = "ORQUESTA_API_KEY"
# Initialize Orquesta client
options = OrquestaClientOptions(
api_key=api_key,
environment="production",
)
client = Orquesta(options)
# Getting the deployment config
config = client.deployments.get_config(
key="Deployment-with-OpenAI",
context={ "environments": [ "production" ], "country": [ "NLD", "BEL" ], "locale": [ "en" ], "user-segment": [ "b2c" ] },
inputs={ "customer_name": "John" },
metadata={"custom-field-name":"custom-metadata-value"}
)
deployment_config = config.to_dict()
# Send the payload to OpenAI
client = OpenAI(
api_key = "OPENAI_API_KEY",
)
chat_completion = client.chat.completions.create(
messages= deployment_config['messages'],
model=deployment_config['model'],
)
# Print the response
print(chat_completion.choices[0].message.content)
import OpenAI from 'openai';
const start = async () => {
const { createClient } = await import('@orquesta/node');
// Initialize Orquesta client
const client = createClient({
apiKey: 'ORQUESTA_API_KEY',
environment: 'production',
});
// Getting the deployment config
const deploymentConfig = await client.deployments.getConfig({
key: 'Deployment-with-OpenAI',
context: {
environments: ['production'],
country: ['NLD', 'BEL'],
locale: ['en'],
'user-segment': ['b2c'],
},
inputs: {
customer_name: 'John',
},
metadata: {
'custom-field-name': 'custom-metadata-value',
},
});
console.log(deploymentConfig);
const deploymentConfigObj: any = deploymentConfig;
// Send the payload to OpenAI
const openaiApiKey: string = 'OPENAI_API_KEY';
const openai = new OpenAI({
apiKey: openaiApiKey, // This is your openai api key
});
const chatCompletion = await openai.chat.completions.create({
messages: deploymentConfigObj.messages,
model: deploymentConfigObj.model,
});
// Print the response
console.log(chatCompletion.choices[0].message.content);
};
// Call the async function
start();
Step 4: Additional metrics to the request
After receiving your results from OpenAI, add metrics to the transaction using the add_metrics
method to complete the missing data for your Logging and Monitoring
# Additional informatiom
deployment.add_metrics(
chain_id="c4a75b53-62fa-401b-8e97-493f3d299316",
conversation_id="ee7b0c8c-eeb2-43cf-83e9-a4a49f8f13ea",
user_id="e3a202a6-461b-447c-abe2-018ba4d04cd0",
feedback={"score": 90},
metadata={
"custom": "custom_metadata",
"chain_id": "ad1231xsdaABw",
},
usage={
"prompt_tokens": 100,
"completion_tokens": 900,
"total_tokens": 1000,
},
performance={
"latency": 9000,
"time_to_first_token": 250,
}
)
deployment.addMetrics({
chain_id: "c4a75b53-62fa-401b-8e97-493f3d299316",
conversation_id: "ee7b0c8c-eeb2-43cf-83e9-a4a49f8f13ea",
user_id: "e3a202a6-461b-447c-abe2-018ba4d04cd0",
feedback: {
score: 100
},
metadata: {
custom: "custom_metadata",
chain_id: "ad1231xsdaABw"
},
usage: {
prompt_tokens: 100,
completion_tokens: 900,
total_tokens: 1000
},
performance: {
latency: 9000,
time_to_first_token: 250
}
})
Updated 3 months ago