Skip to content

Commit eccd49b

Browse files
committed
Update configs for chatgpt-5
1 parent d015b07 commit eccd49b

File tree

3 files changed

+4
-6
lines changed

3 files changed

+4
-6
lines changed

express-server/src/routes/create.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -246,7 +246,7 @@ const buildPipelineJob = (
246246
cruxes: updatedConfig.cruxesEnabled,
247247
},
248248
llm: {
249-
model: "gpt-4o-mini", // ! Change when we allow different models
249+
model: "gpt-5-mini", // ! Change when we allow different models
250250
},
251251
},
252252
data: updatedConfig.data,

pyserver/config.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
DRY_RUN = False
99

1010
# cheapest for testing
11-
MODEL = "gpt-4o-mini" # prod default: "gpt-4-turbo-preview"
11+
MODEL = "gpt-5-mini" # prod default: "gpt-4-turbo-preview"
1212

1313
COST_BY_MODEL = {
1414
# GPT-4o mini: Input is $0.150 / 1M tokens, Output is $0.600 / 1M tokens
@@ -17,6 +17,8 @@
1717
# GPT-4o : Input is $2.50 / 1M tokens, Output is $10.00/1M tokens
1818
# or: input is $0.0025/1K tokens, output is $0.01/1K tokens
1919
"gpt-4o": {"in_per_1K": 0.0025, "out_per_1K": 0.01},
20+
# GPT-5-mini: Input is $0.250 / 1M tokens, Output is $2.00/1M tokens
21+
"gpt-5-mini": {"in_per_1K": 0.00025, "out_per_1k": 0.002}
2022
}
2123

2224
# for web-app mode, require at least 3 words in order to extract meaningful claims

pyserver/main.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -277,7 +277,6 @@ def comments_to_tree(
277277
{"role": "system", "content": req.llm.system_prompt},
278278
{"role": "user", "content": full_prompt},
279279
],
280-
temperature=0.0,
281280
response_format={"type": "json_object"},
282281
)
283282
try:
@@ -383,7 +382,6 @@ def comment_to_claims(llm: dict, comment: str, tree: dict, api_key: str) -> dict
383382
},
384383
{"role": "user", "content": full_prompt},
385384
],
386-
temperature=0.0,
387385
response_format={"type": "json_object"},
388386
)
389387
try:
@@ -708,7 +706,6 @@ def dedup_claims(claims: list, llm: LLMConfig, api_key: str) -> dict:
708706
{"role": "system", "content": llm.system_prompt},
709707
{"role": "user", "content": full_prompt},
710708
],
711-
temperature=0.0,
712709
response_format={"type": "json_object"},
713710
)
714711
try:
@@ -1264,7 +1261,6 @@ def cruxes_for_topic(
12641261
{"role": "system", "content": llm.system_prompt},
12651262
{"role": "user", "content": full_prompt},
12661263
],
1267-
temperature=0.0,
12681264
response_format={"type": "json_object"},
12691265
)
12701266
crux = response.choices[0].message.content

0 commit comments

Comments
 (0)