-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathserver.js
More file actions
64 lines (54 loc) · 1.62 KB
/
server.js
File metadata and controls
64 lines (54 loc) · 1.62 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
require('dotenv').config();
const express = require('express');
const cors = require('cors');
const multer = require('multer');
const path = require('path');
const fs = require('fs');
const OpenAI = require('openai');
const app = express();
const PORT = process.env.PORT || 3001;
// Configure storage for audio files
const storage = multer.diskStorage({
destination: (req, file, cb) => {
if (!fs.existsSync('./uploads')) {
fs.mkdirSync('./uploads');
}
cb(null, './uploads/');
},
filename: (req, file, cb) => {
cb(null, `audio-${Date.now()}${path.extname(file.originalname)}`);
}
});
const upload = multer({ storage });
// Initialize OpenAI
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY
});
// Middleware
app.use(cors());
app.use(express.json());
app.use(express.static('public'));
// Routes
app.post('/api/transcribe', upload.single('audio'), async (req, res) => {
if (!req.file) {
return res.status(400).json({ error: 'No audio file provided' });
}
try {
// Revert to Whisper model for transcription
const transcription = await openai.audio.transcriptions.create({
file: fs.createReadStream(req.file.path),
model: 'whisper-1',
});
// Delete the audio file after transcription
fs.unlink(req.file.path, (err) => {
if (err) console.error('Error deleting file:', err);
});
res.json({ transcription: transcription.text });
} catch (error) {
console.error('Error transcribing audio:', error);
res.status(500).json({ error: 'Failed to transcribe audio' });
}
});
app.listen(PORT, () => {
console.log(`Server running on port ${PORT}`);
});