Skip to content

Commit e3b43b9

Browse files
committed
fix!: build errors
1 parent 2509ff3 commit e3b43b9

File tree

10 files changed

+87
-24
lines changed

10 files changed

+87
-24
lines changed

Cargo.lock

Lines changed: 6 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

open-router-blueprint-template-bin/src/main.rs

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,10 @@ async fn main() -> Result<(), blueprint_sdk::Error> {
7474
let result = BlueprintRunner::builder(tangle_config, env)
7575
.router(
7676
Router::new()
77-
.route(PROCESS_LLM_REQUEST_JOB_ID, process_llm_request.layer(TangleLayer))
77+
.route(
78+
PROCESS_LLM_REQUEST_JOB_ID,
79+
process_llm_request.layer(TangleLayer),
80+
)
7881
.route(REPORT_METRICS_JOB_ID, report_metrics.layer(TangleLayer))
7982
.layer(FilterLayer::new(MatchesServiceId(service_id)))
8083
.with_context(context),

open-router-blueprint-template-lib/src/context.rs

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,12 @@ use tracing::info;
77
use crate::config::BlueprintConfig;
88
use crate::llm::{LlmClient, LocalLlmClient, LocalLlmConfig, NodeMetrics};
99
use crate::load_balancer::{LoadBalancer, LoadBalancerConfig};
10+
use blueprint_sdk::macros::context::{KeystoreContext, ServicesContext, TangleClientContext};
1011

1112
/// Context for the OpenRouter Blueprint
12-
#[derive(Clone)]
13+
#[derive(Clone, KeystoreContext, TangleClientContext, ServicesContext)]
1314
pub struct OpenRouterContext {
15+
#[config]
1416
/// Blueprint environment configuration
1517
pub env: BlueprintEnvironment,
1618

open-router-blueprint-template-lib/src/jobs.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,10 @@ use crate::context::OpenRouterContext;
66
use crate::llm::{LlmClientExt, LlmRequest, LlmResponse};
77

88
/// Job ID for processing LLM requests
9-
pub const PROCESS_LLM_REQUEST_JOB_ID: u32 = 0;
9+
pub const PROCESS_LLM_REQUEST_JOB_ID: u8 = 0;
1010

1111
/// Job ID for reporting metrics
12-
pub const REPORT_METRICS_JOB_ID: u32 = 1;
12+
pub const REPORT_METRICS_JOB_ID: u8 = 1;
1313

1414
/// Process an LLM request
1515
///

open-router-blueprint-template-lib/src/llm/local_llm.rs

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -150,15 +150,13 @@ impl LlmClient for LocalLlmClient {
150150
}
151151

152152
/// Template method for embeddings. To use, override this method in your concrete blueprint.
153-
async fn embeddings(
154-
&self,
155-
request: EmbeddingRequest,
156-
) -> Result<EmbeddingResponse> {
153+
async fn embeddings(&self, request: EmbeddingRequest) -> Result<EmbeddingResponse> {
157154
if !self.config.models.iter().any(|m| m.id == request.model) {
158155
return Err(LlmError::ModelNotSupported(request.model));
159156
}
160157
Err(LlmError::NotImplemented(
161-
"embeddings must be implemented in your blueprint (see LocalLlmClient in template)".to_string(),
158+
"embeddings must be implemented in your blueprint (see LocalLlmClient in template)"
159+
.to_string(),
162160
))
163161
}
164162
}

open-router-blueprint-template-lib/src/llm/mod.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ pub enum LlmError {
4343
pub type Result<T> = std::result::Result<T, LlmError>;
4444

4545
/// Trait for LLM clients
46+
#[allow(async_fn_in_trait)]
4647
#[async_trait]
4748
pub trait LlmClient: Send + Sync {
4849
/// Get information about the supported models
@@ -137,6 +138,7 @@ pub struct NodeMetrics {
137138
}
138139

139140
/// Trait for LLM clients that support streaming responses
141+
#[allow(async_fn_in_trait)]
140142
#[async_trait::async_trait]
141143
pub trait StreamingLlmClient: LlmClient {
142144
/// Process a streaming chat completion request
@@ -153,6 +155,7 @@ pub trait StreamingLlmClient: LlmClient {
153155
}
154156

155157
/// Extension trait for checking if an LlmClient also implements StreamingLlmClient
158+
#[allow(async_fn_in_trait)]
156159
pub trait LlmClientExt {
157160
/// Check if this client supports streaming
158161
fn supports_streaming(&self) -> bool;

open-router-blueprint-template-lib/src/llm/models.rs

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,20 @@ pub struct ChatCompletionRequest {
4545
pub additional_params: HashMap<String, serde_json::Value>,
4646
}
4747

48+
impl Default for ChatCompletionRequest {
49+
fn default() -> Self {
50+
Self {
51+
model: String::new(),
52+
messages: Vec::new(),
53+
max_tokens: None,
54+
temperature: None,
55+
top_p: None,
56+
stream: None,
57+
additional_params: HashMap::new(),
58+
}
59+
}
60+
}
61+
4862
/// A chat completion choice
4963
#[derive(Debug, Clone, Serialize, Deserialize)]
5064
pub struct ChatCompletionChoice {
@@ -110,6 +124,20 @@ pub struct TextCompletionRequest {
110124
pub additional_params: HashMap<String, serde_json::Value>,
111125
}
112126

127+
impl Default for TextCompletionRequest {
128+
fn default() -> Self {
129+
Self {
130+
model: String::new(),
131+
prompt: String::new(),
132+
max_tokens: None,
133+
temperature: None,
134+
top_p: None,
135+
stream: None,
136+
additional_params: HashMap::new(),
137+
}
138+
}
139+
}
140+
113141
/// A text completion choice
114142
#[derive(Debug, Clone, Serialize, Deserialize)]
115143
pub struct TextCompletionChoice {
@@ -159,6 +187,16 @@ pub struct EmbeddingRequest {
159187
pub additional_params: HashMap<String, serde_json::Value>,
160188
}
161189

190+
impl Default for EmbeddingRequest {
191+
fn default() -> Self {
192+
Self {
193+
model: String::new(),
194+
input: Vec::new(),
195+
additional_params: HashMap::new(),
196+
}
197+
}
198+
}
199+
162200
/// A single embedding result
163201
#[derive(Debug, Clone, Serialize, Deserialize)]
164202
pub struct EmbeddingData {
@@ -212,6 +250,12 @@ pub enum LlmRequest {
212250
Embedding(EmbeddingRequest),
213251
}
214252

253+
impl Default for LlmRequest {
254+
fn default() -> Self {
255+
Self::ChatCompletion(ChatCompletionRequest::default())
256+
}
257+
}
258+
215259
/// A unified response type that can represent any LLM operation result
216260
#[derive(Debug, Clone, Serialize, Deserialize)]
217261
#[serde(tag = "type")]

open-router-blueprint-template-lib/src/llm/streaming.rs

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,13 @@
11
use std::pin::Pin;
22
// use std::task::{Context, Poll};
3-
use async_trait::async_trait;
3+
// Removed unused import: async_trait::async_trait
44
use futures::stream::{Stream, StreamExt};
55
use tokio::sync::mpsc;
66
use tokio_stream::wrappers::ReceiverStream;
77

88
use super::{
9-
ChatCompletionChoice, ChatCompletionRequest, ChatCompletionResponse, ChatMessage, LlmError,
10-
Result, TextCompletionChoice, TextCompletionRequest, TextCompletionResponse,
9+
ChatCompletionChoice, ChatCompletionResponse, ChatMessage, LlmError, Result,
10+
TextCompletionChoice, TextCompletionResponse,
1111
};
1212

1313
/// A chunk of a streaming chat completion response
@@ -185,10 +185,13 @@ pub async fn collect_chat_completion_stream(
185185
.collect();
186186

187187
Ok(ChatCompletionResponse {
188-
// id,
189-
// object: "chat.completion".to_string(),
190-
// created,
191-
// model,
188+
id: "stream-collected".to_string(),
189+
object: "chat.completion".to_string(),
190+
created: std::time::SystemTime::now()
191+
.duration_since(std::time::UNIX_EPOCH)
192+
.unwrap_or_default()
193+
.as_secs(),
194+
model: "unknown".to_string(),
192195
choices: response_choices,
193196
usage: None, // Usage information is not available when streaming
194197
})
@@ -250,10 +253,13 @@ pub async fn collect_text_completion_stream(
250253
.collect();
251254

252255
Ok(TextCompletionResponse {
253-
// id,
254-
// object: "text_completion".to_string(),
255-
// created,
256-
// model,
256+
id: "stream-collected".to_string(),
257+
object: "text_completion".to_string(),
258+
created: std::time::SystemTime::now()
259+
.duration_since(std::time::UNIX_EPOCH)
260+
.unwrap_or_default()
261+
.as_secs(),
262+
model: "unknown".to_string(),
257263
choices: response_choices,
258264
usage: None, // Usage information is not available when streaming
259265
})

open-router-blueprint-template-lib/tests/e2e.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ use blueprint_sdk::tangle::serde::to_field;
33
use blueprint_sdk::testing::tempfile;
44
use blueprint_sdk::testing::utils::setup_log;
55
use blueprint_sdk::testing::utils::tangle::TangleTestHarness;
6+
use blueprint_sdk::Job;
67
use open_router_blueprint_template_lib::{
78
process_llm_request, OpenRouterContext, PROCESS_LLM_REQUEST_JOB_ID,
89
};
@@ -23,7 +24,7 @@ async fn test_blueprint() -> color_eyre::Result<()> {
2324

2425
test_env.initialize().await?;
2526
test_env
26-
.add_job(PROCESS_LLM_REQUEST_JOB_ID.layer(TangleLayer))
27+
.add_job(process_llm_request.layer(TangleLayer))
2728
.await;
2829

2930
test_env.start(context).await?;

open-router-blueprint-template-lib/tests/llm_request_test.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ async fn test_process_chat_completion_request() -> color_eyre::Result<()> {
5757
let result = process_llm_request(Context(context), TangleArg(llm_request)).await?;
5858

5959
// Convert the result to a JobResult
60-
let job_result = result.into_job_result()?;
60+
let job_result = result.into_job_result().unwrap();
6161

6262
// Verify that the job was successful
6363
match job_result {
@@ -100,7 +100,7 @@ async fn test_process_text_completion_request() -> color_eyre::Result<()> {
100100
let result = process_llm_request(Context(context), TangleArg(llm_request)).await?;
101101

102102
// Convert the result to a JobResult
103-
let job_result = result.into_job_result()?;
103+
let job_result = result.into_job_result().unwrap();
104104

105105
// Verify that the job was successful
106106
match job_result {
@@ -139,7 +139,7 @@ async fn test_process_embedding_request() -> color_eyre::Result<()> {
139139
let result = process_llm_request(Context(context), TangleArg(llm_request)).await?;
140140

141141
// Convert the result to a JobResult
142-
let job_result = result.into_job_result()?;
142+
let job_result = result.into_job_result().unwrap();
143143

144144
// Verify that the job was successful
145145
match job_result {

0 commit comments

Comments
 (0)