Integrate DSPy with the AI Router for optimized LLM programs. Use Stanford’s framework for automatic prompt optimization and reasoning-based AI systems.
DSPy is a framework for programmatically optimizing LLM prompts and weights through composable modules and signatures. By connecting DSPy to Orq.ai’s AI Router, you get access to 300+ models for your prompt optimization pipelines with a single configuration change.
import dspyimport oslm = dspy.LM( "openai/openai/gpt-4o", api_key=os.getenv("ORQ_API_KEY"), api_base="https://api.orq.ai/v2/router",)dspy.configure(lm=lm)class MathProblem(dspy.Signature): """Solve math problems step by step.""" problem: str = dspy.InputField() answer: str = dspy.OutputField()cot = dspy.ChainOfThought(MathProblem)result = cot(problem="If a train travels 120 miles in 2 hours, what is its speed?")print(result.answer)
Stanford DSPy is a framework for algorithmically optimizing LM prompts and weights through programming rather than prompting. Tracing DSPy with Orq.ai provides comprehensive insights into signature execution, module performance, optimization processes, and few-shot learning effectiveness to optimize your programmatic LLM applications.
import dspyfrom opentelemetry import tracefrom opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporterfrom opentelemetry.sdk.resources import Resourcefrom opentelemetry.sdk.trace import TracerProviderfrom opentelemetry.sdk.trace.export import BatchSpanProcessorimport os# Configure OpenTelemetrytracer_provider = TracerProvider( resource=Resource({"service.name": "dspy-app"}))otlp_exporter = OTLPSpanExporter( endpoint=f"{os.getenv('OTEL_EXPORTER_OTLP_ENDPOINT')}/v1/traces", headers={"Authorization": os.getenv('OTEL_EXPORTER_OTLP_HEADERS').split('=', 1)[1]})tracer_provider.add_span_processor(BatchSpanProcessor(otlp_exporter))# Instrument DSPyfrom openinference.instrumentation.dspy import DSPyInstrumentorDSPyInstrumentor().instrument(tracer_provider=tracer_provider)# Modern DSPy syntax (v2.0+)lm = dspy.LM('openai/gpt-4', api_key=os.getenv('OPENAI_API_KEY'))dspy.configure(lm=lm)# Define signatureclass BasicQA(dspy.Signature): """Answer questions with helpful and accurate responses""" question = dspy.InputField() answer = dspy.OutputField(desc="A comprehensive answer to the question")# Create predictor (automatically traced)qa = dspy.Predict(BasicQA)# Execute predictionresult = qa(question="What are the benefits of renewable energy?")print(result.answer)
All DSPy signature executions and module operations will be automatically instrumented and exported to Orq.ai through the OTLP exporter. For more details, see Traces.
import dspy# Setup done as shown in Integration section abovelm = dspy.LM('openai/gpt-4', api_key=os.getenv('OPENAI_API_KEY'))dspy.configure(lm=lm)# Define signature with reasoningclass ComplexProblem(dspy.Signature): """Solve complex problems with step-by-step reasoning""" problem = dspy.InputField(desc="The problem to solve") reasoning = dspy.OutputField(desc="Step-by-step reasoning process") solution = dspy.OutputField(desc="Final solution")# Use Chain of Thoughtclass ReasoningModule(dspy.Module): def __init__(self): super().__init__() self.think = dspy.ChainOfThought(ComplexProblem) def forward(self, problem): return self.think(problem=problem)# Execute with tracingreasoner = ReasoningModule()problems = [ "A farmer has 17 sheep. All but 9 die. How many are left?", "If it takes 5 machines 5 minutes to make 5 widgets, how long for 100 machines to make 100 widgets?"]for problem in problems: result = reasoner(problem=problem) print(f"Problem: {problem}") print(f"Reasoning: {result.reasoning}") print(f"Solution: {result.solution}\n")