fix autoprovider
Browse files
server.js
CHANGED
|
@@ -377,7 +377,7 @@ app.post("/api/ask-ai", async (req, res) => {
|
|
| 377 |
});
|
| 378 |
|
| 379 |
app.put("/api/ask-ai", async (req, res) => {
|
| 380 |
-
const { prompt, html, previousPrompt
|
| 381 |
if (!prompt || !html) {
|
| 382 |
return res.status(400).send({
|
| 383 |
ok: false,
|
|
@@ -424,13 +424,6 @@ ${REPLACE_END}
|
|
| 424 |
|
| 425 |
// force to use deepseek-ai/DeepSeek-V3-0324 model, to avoid thinker models.
|
| 426 |
const selectedModel = MODELS[0];
|
| 427 |
-
if (!selectedModel.providers.includes(provider) && provider !== "auto") {
|
| 428 |
-
return res.status(400).send({
|
| 429 |
-
ok: false,
|
| 430 |
-
openSelectProvider: true,
|
| 431 |
-
message: `The selected model does not support the ${provider} provider.`,
|
| 432 |
-
});
|
| 433 |
-
}
|
| 434 |
|
| 435 |
let { hf_token } = req.cookies;
|
| 436 |
let token = hf_token;
|
|
@@ -461,23 +454,7 @@ ${REPLACE_END}
|
|
| 461 |
|
| 462 |
const client = new InferenceClient(token);
|
| 463 |
|
| 464 |
-
|
| 465 |
-
// if (previousPrompt) TOKENS_USED += previousPrompt.length;
|
| 466 |
-
// if (html) TOKENS_USED += html.length;
|
| 467 |
-
|
| 468 |
-
const DEFAULT_PROVIDER = PROVIDERS.novita;
|
| 469 |
-
const selectedProvider =
|
| 470 |
-
provider === "auto"
|
| 471 |
-
? PROVIDERS[selectedModel.autoProvider]
|
| 472 |
-
: PROVIDERS[provider] ?? DEFAULT_PROVIDER;
|
| 473 |
-
|
| 474 |
-
// if (provider !== "auto" && TOKENS_USED >= selectedProvider.max_tokens) {
|
| 475 |
-
// return res.status(400).send({
|
| 476 |
-
// ok: false,
|
| 477 |
-
// openSelectProvider: true,
|
| 478 |
-
// message: `Context is too long. ${selectedProvider.name} allow ${selectedProvider.max_tokens} max tokens.`,
|
| 479 |
-
// });
|
| 480 |
-
// }
|
| 481 |
|
| 482 |
try {
|
| 483 |
const response = await client.chatCompletion({
|
|
|
|
| 377 |
});
|
| 378 |
|
| 379 |
app.put("/api/ask-ai", async (req, res) => {
|
| 380 |
+
const { prompt, html, previousPrompt } = req.body;
|
| 381 |
if (!prompt || !html) {
|
| 382 |
return res.status(400).send({
|
| 383 |
ok: false,
|
|
|
|
| 424 |
|
| 425 |
// force to use deepseek-ai/DeepSeek-V3-0324 model, to avoid thinker models.
|
| 426 |
const selectedModel = MODELS[0];
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 427 |
|
| 428 |
let { hf_token } = req.cookies;
|
| 429 |
let token = hf_token;
|
|
|
|
| 454 |
|
| 455 |
const client = new InferenceClient(token);
|
| 456 |
|
| 457 |
+
const selectedProvider = PROVIDERS[selectedModel.autoProvider];
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 458 |
|
| 459 |
try {
|
| 460 |
const response = await client.chatCompletion({
|