Skip to content

Commit 61aee52

Browse files
author
Gerome El-assaad
committed
fixed vercel build errors
1 parent 451da24 commit 61aee52

File tree

2 files changed

+22
-129
lines changed

2 files changed

+22
-129
lines changed

app/api/chat/route.ts

Lines changed: 13 additions & 129 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
import { Duration } from '@/lib/duration'
22
import {
33
getModelClient,
4-
getDefaultModelParams,
54
LLMModel,
65
LLMModelConfig,
76
} from '@/lib/models'
@@ -10,8 +9,6 @@ import ratelimit from '@/lib/ratelimit'
109
import { fragmentSchema as schema } from '@/lib/schema'
1110
import { Templates } from '@/lib/templates'
1211
import { streamObject, LanguageModel, CoreMessage } from 'ai'
13-
import { ChatPersistence } from '@/lib/chat-persistence'
14-
import { createServerClient } from '@/lib/supabase-server'
1512

1613
export const maxDuration = 300
1714

@@ -30,17 +27,13 @@ export async function POST(req: Request) {
3027
template,
3128
model,
3229
config,
33-
sessionId,
34-
saveToHistory = true,
3530
}: {
3631
messages: CoreMessage[]
3732
userID: string | undefined
3833
teamID: string | undefined
3934
template: Templates
4035
model: LLMModel
4136
config: LLMModelConfig
42-
sessionId?: string
43-
saveToHistory?: boolean
4437
} = await req.json()
4538

4639
const limit = !config.apiKey
@@ -64,174 +57,65 @@ export async function POST(req: Request) {
6457

6558
console.log('userID', userID)
6659
console.log('teamID', teamID)
60+
console.log('template', template)
6761
console.log('model', model)
68-
console.log('sessionId', sessionId)
62+
console.log('config', config)
6963

7064
const { model: modelNameString, apiKey: modelApiKey, ...modelParams } = config
71-
const modelClient = await getModelClient(model, config)
72-
73-
// Save user message to history if enabled and user is authenticated
74-
let currentSessionId = sessionId
75-
if (saveToHistory && userID) {
76-
try {
77-
// Get the last user message from the messages array
78-
const lastMessage = messages[messages.length - 1]
79-
if (lastMessage && lastMessage.role === 'user') {
80-
// Create new session if no sessionId provided
81-
if (!currentSessionId) {
82-
const session = await ChatPersistence.createSession(
83-
userID,
84-
teamID,
85-
{
86-
role: 'user',
87-
content: typeof lastMessage.content === 'string' ? lastMessage.content : JSON.stringify(lastMessage.content),
88-
model: model.id,
89-
template: template.toString(),
90-
metadata: {
91-
userID,
92-
teamID,
93-
}
94-
}
95-
)
96-
currentSessionId = session.sessionId
97-
} else {
98-
// Add message to existing session
99-
await ChatPersistence.addMessage(userID, currentSessionId, {
100-
role: 'user',
101-
content: typeof lastMessage.content === 'string' ? lastMessage.content : JSON.stringify(lastMessage.content),
102-
model: model.id,
103-
template: template.toString(),
104-
metadata: {
105-
userID,
106-
teamID,
107-
}
108-
})
109-
}
110-
}
111-
} catch (historyError) {
112-
console.error('Failed to save user message to history:', historyError)
113-
// Continue with request even if history save fails
114-
}
115-
}
65+
const modelClient = getModelClient(model, config)
11666

11767
try {
11868
const stream = await streamObject({
11969
model: modelClient as LanguageModel,
12070
schema,
12171
system: toPrompt(template),
12272
messages,
123-
maxRetries: 0,
124-
...getDefaultModelParams(model),
73+
maxRetries: 0, // do not retry on errors
12574
...modelParams,
12675
})
12776

128-
// Create response with session handling
129-
const response = stream.toTextStreamResponse()
130-
131-
// Add session ID to response headers if we created one
132-
if (currentSessionId && currentSessionId !== sessionId) {
133-
response.headers.set('X-Session-Id', currentSessionId)
134-
}
135-
136-
// Note: Assistant response saving will be implemented in a future update
137-
// when we have better streaming completion handling
138-
139-
return response
77+
return stream.toTextStreamResponse()
14078
} catch (error: any) {
141-
console.error('Chat API Error:', {
142-
message: error?.message,
143-
status: error?.statusCode,
144-
provider: model,
145-
stack: error?.stack
146-
})
147-
14879
const isRateLimitError =
149-
error && (error.statusCode === 429 || error.message.includes('limit') || error.message.includes('rate'))
80+
error && (error.statusCode === 429 || error.message.includes('limit'))
15081
const isOverloadedError =
15182
error && (error.statusCode === 529 || error.statusCode === 503)
15283
const isAccessDeniedError =
153-
error && (error.statusCode === 403 || error.statusCode === 401 || error.message.includes('unauthorized') || error.message.includes('invalid') && error.message.includes('key'))
154-
const isModelError =
155-
error && (error.statusCode === 404 || error.message.includes('not found') || error.message.includes('model'))
156-
const isNetworkError =
157-
error && (error.code === 'ECONNREFUSED' || error.code === 'ETIMEDOUT' || error.message.includes('network'))
84+
error && (error.statusCode === 403 || error.statusCode === 401)
15885

15986
if (isRateLimitError) {
16087
return new Response(
161-
JSON.stringify({
162-
error: 'Rate limit exceeded. Please try again later or use your own API key.',
163-
type: 'rate_limit',
164-
retryAfter: 60
165-
}),
88+
'The provider is currently unavailable due to request limit. Try using your own API key.',
16689
{
16790
status: 429,
168-
headers: { 'Content-Type': 'application/json' }
16991
},
17092
)
17193
}
17294

17395
if (isOverloadedError) {
17496
return new Response(
175-
JSON.stringify({
176-
error: 'The AI service is currently overloaded. Please try again in a few moments.',
177-
type: 'service_overload',
178-
retryAfter: 30
179-
}),
97+
'The provider is currently unavailable. Please try again later.',
18098
{
181-
status: 503,
182-
headers: { 'Content-Type': 'application/json' }
99+
status: 529,
183100
},
184101
)
185102
}
186103

187104
if (isAccessDeniedError) {
188105
return new Response(
189-
JSON.stringify({
190-
error: 'Invalid API key or access denied. Please check your API key configuration.',
191-
type: 'auth_error'
192-
}),
106+
'Access denied. Please make sure your API key is valid.',
193107
{
194108
status: 403,
195-
headers: { 'Content-Type': 'application/json' }
196109
},
197110
)
198111
}
199112

200-
if (isModelError) {
201-
return new Response(
202-
JSON.stringify({
203-
error: 'The selected AI model is not available. Please try a different model.',
204-
type: 'model_error'
205-
}),
206-
{
207-
status: 400,
208-
headers: { 'Content-Type': 'application/json' }
209-
},
210-
)
211-
}
212-
213-
if (isNetworkError) {
214-
return new Response(
215-
JSON.stringify({
216-
error: 'Network connection failed. Please check your internet connection and try again.',
217-
type: 'network_error'
218-
}),
219-
{
220-
status: 502,
221-
headers: { 'Content-Type': 'application/json' }
222-
},
223-
)
224-
}
113+
console.error('Error:', error)
225114

226115
return new Response(
227-
JSON.stringify({
228-
error: 'An unexpected error occurred. Please try again. If the problem persists, try using a different AI model.',
229-
type: 'unknown_error',
230-
details: error?.message || 'Unknown error'
231-
}),
116+
'An unexpected error has occurred. Please try again later.',
232117
{
233118
status: 500,
234-
headers: { 'Content-Type': 'application/json' }
235119
},
236120
)
237121
}

lib/models.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,4 +80,13 @@ export function getModelClient(model: LLMModel, config: LLMModelConfig) {
8080
}
8181

8282
return createClient()
83+
}
84+
85+
export function getDefaultModelParams(model: LLMModel) {
86+
// Return default parameters for the model
87+
// This can be customized per provider/model if needed
88+
return {
89+
temperature: 0.7,
90+
maxTokens: 4096,
91+
}
8392
}

0 commit comments

Comments
 (0)