|
const express = require('express'); |
|
const { v4: uuidv4 } = require('uuid'); |
|
const { stringToHex, chunkToUtf8String, getRandomIDPro } = require('./utils.js'); |
|
const app = express(); |
|
|
|
|
|
app.use(express.json()); |
|
app.use(express.urlencoded({ extended: true })); |
|
|
|
app.post('/hf/v1/chat/completions', async (req, res) => { |
|
|
|
if (req.body.model.startsWith('o1-') && req.body.stream) { |
|
return res.status(400).json({ |
|
error: 'Model not supported stream', |
|
}); |
|
} |
|
|
|
let currentKeyIndex = 0; |
|
try { |
|
const { model, messages, stream = false } = req.body; |
|
let authToken = req.headers.authorization?.replace('Bearer ', ''); |
|
|
|
const keys = authToken.split(',').map((key) => key.trim()); |
|
if (keys.length > 0) { |
|
|
|
if (currentKeyIndex >= keys.length) { |
|
currentKeyIndex = 0; |
|
} |
|
|
|
authToken = keys[currentKeyIndex]; |
|
} |
|
if (authToken && authToken.includes('%3A%3A')) { |
|
authToken = authToken.split('%3A%3A')[1]; |
|
} |
|
if (!messages || !Array.isArray(messages) || messages.length === 0 || !authToken) { |
|
return res.status(400).json({ |
|
error: 'Invalid request. Messages should be a non-empty array and authorization is required', |
|
}); |
|
} |
|
|
|
const hexData = await stringToHex(messages, model); |
|
|
|
|
|
const checksum = |
|
req.headers['x-cursor-checksum'] ?? |
|
process.env['x-cursor-checksum'] ?? |
|
`zo${getRandomIDPro({ dictType: 'max', size: 6 })}${getRandomIDPro({ dictType: 'max', size: 64 })}/${getRandomIDPro({ dictType: 'max', size: 64 })}`; |
|
|
|
const response = await fetch('https://api2.cursor.sh/aiserver.v1.AiService/StreamChat', { |
|
method: 'POST', |
|
headers: { |
|
'Content-Type': 'application/connect+proto', |
|
authorization: `Bearer ${authToken}`, |
|
'connect-accept-encoding': 'gzip,br', |
|
'connect-protocol-version': '1', |
|
'user-agent': 'connect-es/1.4.0', |
|
'x-amzn-trace-id': `Root=${uuidv4()}`, |
|
'x-cursor-checksum': checksum, |
|
'x-cursor-client-version': '0.42.3', |
|
'x-cursor-timezone': 'Asia/Shanghai', |
|
'x-ghost-mode': 'false', |
|
'x-request-id': uuidv4(), |
|
Host: 'api2.cursor.sh', |
|
}, |
|
body: hexData, |
|
}); |
|
|
|
if (stream) { |
|
res.setHeader('Content-Type', 'text/event-stream'); |
|
res.setHeader('Cache-Control', 'no-cache'); |
|
res.setHeader('Connection', 'keep-alive'); |
|
|
|
const responseId = `chatcmpl-${uuidv4()}`; |
|
|
|
|
|
for await (const chunk of response.body) { |
|
const text = await chunkToUtf8String(chunk); |
|
|
|
if (text.length > 0) { |
|
res.write( |
|
`data: ${JSON.stringify({ |
|
id: responseId, |
|
object: 'chat.completion.chunk', |
|
created: Math.floor(Date.now() / 1000), |
|
model, |
|
choices: [ |
|
{ |
|
index: 0, |
|
delta: { |
|
content: text, |
|
}, |
|
}, |
|
], |
|
})}\n\n`, |
|
); |
|
} |
|
} |
|
|
|
res.write('data: [DONE]\n\n'); |
|
return res.end(); |
|
} else { |
|
let text = ''; |
|
|
|
for await (const chunk of response.body) { |
|
text += await chunkToUtf8String(chunk); |
|
} |
|
|
|
text = text.replace(/^.*<\|END_USER\|>/s, ''); |
|
text = text.replace(/^\n[a-zA-Z]?/, '').trim(); |
|
|
|
|
|
return res.json({ |
|
id: `chatcmpl-${uuidv4()}`, |
|
object: 'chat.completion', |
|
created: Math.floor(Date.now() / 1000), |
|
model, |
|
choices: [ |
|
{ |
|
index: 0, |
|
message: { |
|
role: 'assistant', |
|
content: text, |
|
}, |
|
finish_reason: 'stop', |
|
}, |
|
], |
|
usage: { |
|
prompt_tokens: 0, |
|
completion_tokens: 0, |
|
total_tokens: 0, |
|
}, |
|
}); |
|
} |
|
} catch (error) { |
|
console.error('Error:', error); |
|
if (!res.headersSent) { |
|
if (req.body.stream) { |
|
res.write(`data: ${JSON.stringify({ error: 'Internal server error' })}\n\n`); |
|
return res.end(); |
|
} else { |
|
return res.status(500).json({ error: 'Internal server error' }); |
|
} |
|
} |
|
} |
|
}); |
|
|
|
|
|
const PORT = process.env.PORT || 3000; |
|
app.listen(PORT, () => { |
|
console.log(`服务器运行在端口 ${PORT}`); |
|
}); |