repo stringlengths 5 106 | file_url stringlengths 78 301 | file_path stringlengths 4 211 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:56:49 2026-01-05 02:23:25 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/endpoints/api/document/index.js | server/endpoints/api/document/index.js | const { Telemetry } = require("../../../models/telemetry");
const { validApiKey } = require("../../../utils/middleware/validApiKey");
const { handleAPIFileUpload } = require("../../../utils/files/multer");
const {
viewLocalFiles,
findDocumentInDocuments,
getDocumentsByFolder,
normalizePath,
isWithin,
} = require("../../../utils/files");
const { reqBody, safeJsonParse } = require("../../../utils/http");
const { EventLogs } = require("../../../models/eventLogs");
const { CollectorApi } = require("../../../utils/collectorApi");
const fs = require("fs");
const path = require("path");
const { Document } = require("../../../models/documents");
const { purgeFolder } = require("../../../utils/files/purgeDocument");
const documentsPath =
process.env.NODE_ENV === "development"
? path.resolve(__dirname, "../../../storage/documents")
: path.resolve(process.env.STORAGE_DIR, `documents`);
function apiDocumentEndpoints(app) {
if (!app) return;
app.post(
"/v1/document/upload",
[validApiKey, handleAPIFileUpload],
async (request, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Upload a new file to AnythingLLM to be parsed and prepared for embedding, with optional metadata.'
#swagger.requestBody = {
description: 'File to be uploaded.',
required: true,
content: {
"multipart/form-data": {
schema: {
type: 'object',
required: ['file'],
properties: {
file: {
type: 'string',
format: 'binary',
description: 'The file to upload'
},
addToWorkspaces: {
type: 'string',
description: 'comma-separated text-string of workspace slugs to embed the document into post-upload. eg: workspace1,workspace2',
},
metadata: {
type: 'object',
description: 'Key:Value pairs of metadata to attach to the document in JSON Object format. Only specific keys are allowed - see example.',
example: { 'title': 'Custom Title', 'docAuthor': 'Author Name', 'description': 'A brief description', 'docSource': 'Source of the document' }
}
},
required: ['file']
}
}
}
}
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
success: true,
error: null,
documents: [
{
"location": "custom-documents/anythingllm.txt-6e8be64c-c162-4b43-9997-b068c0071e8b.json",
"name": "anythingllm.txt-6e8be64c-c162-4b43-9997-b068c0071e8b.json",
"url": "file:///Users/tim/Documents/anything-llm/collector/hotdir/anythingllm.txt",
"title": "anythingllm.txt",
"docAuthor": "Unknown",
"description": "Unknown",
"docSource": "a text file uploaded by the user.",
"chunkSource": "anythingllm.txt",
"published": "1/16/2024, 3:07:00 PM",
"wordCount": 93,
"token_count_estimate": 115,
}
]
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
const Collector = new CollectorApi();
const { originalname } = request.file;
const { addToWorkspaces = "", metadata: _metadata = {} } =
reqBody(request);
const metadata =
typeof _metadata === "string"
? safeJsonParse(_metadata, {})
: _metadata;
const processingOnline = await Collector.online();
if (!processingOnline) {
response
.status(500)
.json({
success: false,
error: `Document processing API is not online. Document ${originalname} will not be processed automatically.`,
})
.end();
return;
}
const { success, reason, documents } = await Collector.processDocument(
originalname,
metadata
);
if (!success) {
return response
.status(500)
.json({ success: false, error: reason, documents })
.end();
}
Collector.log(
`Document ${originalname} uploaded processed and successfully. It is now available in documents.`
);
await Telemetry.sendTelemetry("document_uploaded");
await EventLogs.logEvent("api_document_uploaded", {
documentName: originalname,
});
if (!!addToWorkspaces)
await Document.api.uploadToWorkspace(
addToWorkspaces,
documents?.[0].location
);
response.status(200).json({ success: true, error: null, documents });
} catch (e) {
console.error(e.message, e);
response.sendStatus(500).end();
}
}
);
app.post(
"/v1/document/upload/:folderName",
[validApiKey, handleAPIFileUpload],
async (request, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Upload a new file to a specific folder in AnythingLLM to be parsed and prepared for embedding. If the folder does not exist, it will be created.'
#swagger.parameters['folderName'] = {
in: 'path',
description: 'Target folder path (defaults to \"custom-documents\" if not provided)',
required: true,
type: 'string',
example: 'my-folder'
}
#swagger.requestBody = {
description: 'File to be uploaded, with optional metadata.',
required: true,
content: {
"multipart/form-data": {
schema: {
type: 'object',
required: ['file'],
properties: {
file: {
type: 'string',
format: 'binary',
description: 'The file to upload'
},
addToWorkspaces: {
type: 'string',
description: 'comma-separated text-string of workspace slugs to embed the document into post-upload. eg: workspace1,workspace2',
},
metadata: {
type: 'object',
description: 'Key:Value pairs of metadata to attach to the document in JSON Object format. Only specific keys are allowed - see example.',
example: { 'title': 'Custom Title', 'docAuthor': 'Author Name', 'description': 'A brief description', 'docSource': 'Source of the document' }
}
}
}
}
}
}
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
success: true,
error: null,
documents: [{
"location": "custom-documents/anythingllm.txt-6e8be64c-c162-4b43-9997-b068c0071e8b.json",
"name": "anythingllm.txt-6e8be64c-c162-4b43-9997-b068c0071e8b.json",
"url": "file:///Users/tim/Documents/anything-llm/collector/hotdir/anythingllm.txt",
"title": "anythingllm.txt",
"docAuthor": "Unknown",
"description": "Unknown",
"docSource": "a text file uploaded by the user.",
"chunkSource": "anythingllm.txt",
"published": "1/16/2024, 3:07:00 PM",
"wordCount": 93,
"token_count_estimate": 115
}]
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
#swagger.responses[500] = {
description: "Internal Server Error",
content: {
"application/json": {
schema: {
type: 'object',
example: {
success: false,
error: "Document processing API is not online. Document will not be processed automatically."
}
}
}
}
}
*/
try {
const { originalname } = request.file;
const { addToWorkspaces = "", metadata: _metadata = {} } =
reqBody(request);
const metadata =
typeof _metadata === "string"
? safeJsonParse(_metadata, {})
: _metadata;
let folder = request.params?.folderName || "custom-documents";
folder = normalizePath(folder);
const targetFolderPath = path.join(documentsPath, folder);
if (
!isWithin(path.resolve(documentsPath), path.resolve(targetFolderPath))
)
throw new Error("Invalid folder name");
if (!fs.existsSync(targetFolderPath))
fs.mkdirSync(targetFolderPath, { recursive: true });
const Collector = new CollectorApi();
const processingOnline = await Collector.online();
if (!processingOnline) {
return response
.status(500)
.json({
success: false,
error: `Document processing API is not online. Document ${originalname} will not be processed automatically.`,
})
.end();
}
// Process the uploaded document with metadata
const { success, reason, documents } = await Collector.processDocument(
originalname,
metadata
);
if (!success) {
return response
.status(500)
.json({ success: false, error: reason, documents })
.end();
}
// For each processed document, check if it is already in the desired folder.
// If not, move it using similar logic as in the move-files endpoint.
for (const doc of documents) {
const currentFolder = path.dirname(doc.location);
if (currentFolder !== folder) {
const sourcePath = path.join(
documentsPath,
normalizePath(doc.location)
);
const destinationPath = path.join(
targetFolderPath,
path.basename(doc.location)
);
if (
!isWithin(documentsPath, sourcePath) ||
!isWithin(documentsPath, destinationPath)
)
throw new Error("Invalid file location");
fs.renameSync(sourcePath, destinationPath);
doc.location = path.join(folder, path.basename(doc.location));
doc.name = path.basename(doc.location);
}
}
Collector.log(
`Document ${originalname} uploaded, processed, and moved to folder ${folder} successfully.`
);
await Telemetry.sendTelemetry("document_uploaded");
await EventLogs.logEvent("api_document_uploaded", {
documentName: originalname,
folder,
});
if (!!addToWorkspaces)
await Document.api.uploadToWorkspace(
addToWorkspaces,
documents?.[0].location
);
response.status(200).json({ success: true, error: null, documents });
} catch (e) {
console.error(e.message, e);
response.sendStatus(500).end();
}
}
);
app.post(
"/v1/document/upload-link",
[validApiKey],
async (request, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Upload a valid URL for AnythingLLM to scrape and prepare for embedding. Optionally, specify a comma-separated list of workspace slugs to embed the document into post-upload.'
#swagger.requestBody = {
description: 'Link of web address to be scraped and optionally a comma-separated list of workspace slugs to embed the document into post-upload, and optional metadata.',
required: true,
content: {
"application/json": {
schema: {
type: 'object',
example: {
"link": "https://anythingllm.com",
"addToWorkspaces": "workspace1,workspace2",
"scraperHeaders": {
"Authorization": "Bearer token123",
"My-Custom-Header": "value"
},
"metadata": {
"title": "Custom Title",
"docAuthor": "Author Name",
"description": "A brief description",
"docSource": "Source of the document"
}
}
}
}
}
}
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
success: true,
error: null,
documents: [
{
"id": "c530dbe6-bff1-4b9e-b87f-710d539d20bc",
"url": "file://useanything_com.html",
"title": "useanything_com.html",
"docAuthor": "no author found",
"description": "No description found.",
"docSource": "URL link uploaded by the user.",
"chunkSource": "https:anythingllm.com.html",
"published": "1/16/2024, 3:46:33 PM",
"wordCount": 252,
"pageContent": "AnythingLLM is the best....",
"token_count_estimate": 447,
"location": "custom-documents/url-useanything_com-c530dbe6-bff1-4b9e-b87f-710d539d20bc.json"
}
]
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
const Collector = new CollectorApi();
const {
link,
addToWorkspaces = "",
scraperHeaders = {},
metadata: _metadata = {},
} = reqBody(request);
const metadata =
typeof _metadata === "string"
? safeJsonParse(_metadata, {})
: _metadata;
const processingOnline = await Collector.online();
if (!processingOnline) {
return response
.status(500)
.json({
success: false,
error: `Document processing API is not online. Link ${link} will not be processed automatically.`,
})
.end();
}
const { success, reason, documents } = await Collector.processLink(
link,
scraperHeaders,
metadata
);
if (!success) {
return response
.status(500)
.json({ success: false, error: reason, documents })
.end();
}
Collector.log(
`Link ${link} uploaded processed and successfully. It is now available in documents.`
);
await Telemetry.sendTelemetry("link_uploaded");
await EventLogs.logEvent("api_link_uploaded", {
link,
});
if (!!addToWorkspaces)
await Document.api.uploadToWorkspace(
addToWorkspaces,
documents?.[0].location
);
response.status(200).json({ success: true, error: null, documents });
} catch (e) {
console.error(e.message, e);
response.sendStatus(500).end();
}
}
);
app.post(
"/v1/document/raw-text",
[validApiKey],
async (request, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Upload a file by specifying its raw text content and metadata values without having to upload a file.'
#swagger.requestBody = {
description: 'Text content and metadata of the file to be saved to the system. Use metadata-schema endpoint to get the possible metadata keys',
required: true,
content: {
"application/json": {
schema: {
type: 'object',
example: {
"textContent": "This is the raw text that will be saved as a document in AnythingLLM.",
"addToWorkspaces": "workspace1,workspace2",
"metadata": {
"title": "This key is required. See in /server/endpoints/api/document/index.js:287",
"keyOne": "valueOne",
"keyTwo": "valueTwo",
"etc": "etc"
}
}
}
}
}
}
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
success: true,
error: null,
documents: [
{
"id": "c530dbe6-bff1-4b9e-b87f-710d539d20bc",
"url": "file://my-document.txt",
"title": "hello-world.txt",
"docAuthor": "no author found",
"description": "No description found.",
"docSource": "My custom description set during upload",
"chunkSource": "no chunk source specified",
"published": "1/16/2024, 3:46:33 PM",
"wordCount": 252,
"pageContent": "AnythingLLM is the best....",
"token_count_estimate": 447,
"location": "custom-documents/raw-my-doc-text-c530dbe6-bff1-4b9e-b87f-710d539d20bc.json"
}
]
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
const Collector = new CollectorApi();
const requiredMetadata = ["title"];
const {
textContent,
metadata: _metadata = {},
addToWorkspaces = "",
} = reqBody(request);
const metadata =
typeof _metadata === "string"
? safeJsonParse(_metadata, {})
: _metadata;
const processingOnline = await Collector.online();
if (!processingOnline) {
return response
.status(500)
.json({
success: false,
error: `Document processing API is not online. Request will not be processed.`,
})
.end();
}
if (
!requiredMetadata.every(
(reqKey) =>
Object.keys(metadata).includes(reqKey) && !!metadata[reqKey]
)
) {
return response
.status(422)
.json({
success: false,
error: `You are missing required metadata key:value pairs in your request. Required metadata key:values are ${requiredMetadata
.map((v) => `'${v}'`)
.join(", ")}`,
})
.end();
}
if (!textContent || textContent?.length === 0) {
return response
.status(422)
.json({
success: false,
error: `The 'textContent' key cannot have an empty value.`,
})
.end();
}
const { success, reason, documents } = await Collector.processRawText(
textContent,
metadata
);
if (!success) {
return response
.status(500)
.json({ success: false, error: reason, documents })
.end();
}
Collector.log(
`Document created successfully. It is now available in documents.`
);
await Telemetry.sendTelemetry("raw_document_uploaded");
await EventLogs.logEvent("api_raw_document_uploaded");
if (!!addToWorkspaces)
await Document.api.uploadToWorkspace(
addToWorkspaces,
documents?.[0].location
);
response.status(200).json({ success: true, error: null, documents });
} catch (e) {
console.error(e.message, e);
response.sendStatus(500).end();
}
}
);
app.get("/v1/documents", [validApiKey], async (_, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'List of all locally-stored documents in instance'
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
"localFiles": {
"name": "documents",
"type": "folder",
items: [
{
"name": "my-stored-document.json",
"type": "file",
"id": "bb07c334-4dab-4419-9462-9d00065a49a1",
"url": "file://my-stored-document.txt",
"title": "my-stored-document.txt",
"cached": false
},
]
}
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
const localFiles = await viewLocalFiles();
response.status(200).json({ localFiles });
} catch (e) {
console.error(e.message, e);
response.sendStatus(500).end();
}
});
app.get(
"/v1/documents/folder/:folderName",
[validApiKey],
async (request, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Get all documents stored in a specific folder.'
#swagger.parameters['folderName'] = {
in: 'path',
description: 'Name of the folder to retrieve documents from',
required: true,
type: 'string'
}
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
folder: "custom-documents",
documents: [
{
name: "document1.json",
type: "file",
cached: false,
pinnedWorkspaces: [],
watched: false,
more: "data",
},
{
name: "document2.json",
type: "file",
cached: false,
pinnedWorkspaces: [],
watched: false,
more: "data",
},
]
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
const { folderName } = request.params;
const result = await getDocumentsByFolder(folderName);
response.status(result.code).json({
folder: result.folder,
documents: result.documents,
error: result.error,
});
} catch (e) {
console.error(e.message, e);
response.sendStatus(500).end();
}
}
);
app.get(
"/v1/document/accepted-file-types",
[validApiKey],
async (_, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Check available filetypes and MIMEs that can be uploaded.'
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
"types": {
"application/mbox": [
".mbox"
],
"application/pdf": [
".pdf"
],
"application/vnd.oasis.opendocument.text": [
".odt"
],
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": [
".docx"
],
"text/plain": [
".txt",
".md"
]
}
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
const types = await new CollectorApi().acceptedFileTypes();
if (!types) {
response.sendStatus(404).end();
return;
}
response.status(200).json({ types });
} catch (e) {
console.error(e.message, e);
response.sendStatus(500).end();
}
}
);
app.get(
"/v1/document/metadata-schema",
[validApiKey],
async (_, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Get the known available metadata schema for when doing a raw-text upload and the acceptable type of value for each key.'
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
"schema": {
"keyOne": "string | number | nullable",
"keyTwo": "string | number | nullable",
"specialKey": "number",
"title": "string",
}
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
response.status(200).json({
schema: {
// If you are updating this be sure to update the collector METADATA_KEYS constant in /processRawText.
url: "string | nullable",
title: "string",
docAuthor: "string | nullable",
description: "string | nullable",
docSource: "string | nullable",
chunkSource: "string | nullable",
published: "epoch timestamp in ms | nullable",
},
});
} catch (e) {
console.error(e.message, e);
response.sendStatus(500).end();
}
}
);
// Be careful and place as last route to prevent override of the other /document/ GET
// endpoints!
app.get("/v1/document/:docName", [validApiKey], async (request, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Get a single document by its unique AnythingLLM document name'
#swagger.parameters['docName'] = {
in: 'path',
description: 'Unique document name to find (name in /documents)',
required: true,
type: 'string'
}
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
"localFiles": {
"name": "documents",
"type": "folder",
items: [
{
"name": "my-stored-document.txt-uuid1234.json",
"type": "file",
"id": "bb07c334-4dab-4419-9462-9d00065a49a1",
"url": "file://my-stored-document.txt",
"title": "my-stored-document.txt",
"cached": false
},
]
}
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
const { docName } = request.params;
const document = await findDocumentInDocuments(docName);
if (!document) {
response.sendStatus(404).end();
return;
}
response.status(200).json({ document });
} catch (e) {
console.error(e.message, e);
response.sendStatus(500).end();
}
});
app.post(
"/v1/document/create-folder",
[validApiKey],
async (request, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Create a new folder inside the documents storage directory.'
#swagger.requestBody = {
description: 'Name of the folder to create.',
required: true,
content: {
"application/json": {
schema: {
type: 'string',
example: {
"name": "new-folder"
}
}
}
}
}
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
success: true,
message: null
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
const { name } = reqBody(request);
const storagePath = path.join(documentsPath, normalizePath(name));
if (!isWithin(path.resolve(documentsPath), path.resolve(storagePath)))
throw new Error("Invalid path name");
if (fs.existsSync(storagePath)) {
response.status(500).json({
success: false,
message: "Folder by that name already exists",
});
return;
}
fs.mkdirSync(storagePath, { recursive: true });
response.status(200).json({ success: true, message: null });
} catch (e) {
console.error(e);
response.status(500).json({
success: false,
message: `Failed to create folder: ${e.message}`,
});
}
}
);
app.delete(
"/v1/document/remove-folder",
[validApiKey],
async (request, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Remove a folder and all its contents from the documents storage directory.'
#swagger.requestBody = {
description: 'Name of the folder to remove.',
required: true,
content: {
"application/json": {
schema: {
type: 'object',
properties: {
name: {
type: 'string',
example: "my-folder"
}
}
}
}
}
}
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
success: true,
message: "Folder removed successfully"
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
const { name } = reqBody(request);
await purgeFolder(name);
response
.status(200)
.json({ success: true, message: "Folder removed successfully" });
} catch (e) {
console.error(e);
response.status(500).json({
success: false,
message: `Failed to remove folder: ${e.message}`,
});
}
}
);
app.post(
"/v1/document/move-files",
[validApiKey],
async (request, response) => {
/*
#swagger.tags = ['Documents']
#swagger.description = 'Move files within the documents storage directory.'
#swagger.requestBody = {
description: 'Array of objects containing source and destination paths of files to move.',
required: true,
content: {
"application/json": {
schema: {
type: 'object',
example: {
"files": [
{
"from": "custom-documents/file.txt-fc4beeeb-e436-454d-8bb4-e5b8979cb48f.json",
"to": "folder/file.txt-fc4beeeb-e436-454d-8bb4-e5b8979cb48f.json"
}
]
}
}
}
}
}
#swagger.responses[200] = {
content: {
"application/json": {
schema: {
type: 'object',
example: {
success: true,
message: null
}
}
}
}
}
#swagger.responses[403] = {
schema: {
"$ref": "#/definitions/InvalidAPIKey"
}
}
*/
try {
const { files } = reqBody(request);
const docpaths = files.map(({ from }) => from);
const documents = await Document.where({ docpath: { in: docpaths } });
const embeddedFiles = documents.map((doc) => doc.docpath);
const moveableFiles = files.filter(
({ from }) => !embeddedFiles.includes(from)
);
const movePromises = moveableFiles.map(({ from, to }) => {
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | true |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/endpoints/embed/index.js | server/endpoints/embed/index.js | const { v4: uuidv4 } = require("uuid");
const { reqBody, multiUserMode } = require("../../utils/http");
const { Telemetry } = require("../../models/telemetry");
const { streamChatWithForEmbed } = require("../../utils/chats/embed");
const { EmbedChats } = require("../../models/embedChats");
const {
validEmbedConfig,
canRespond,
setConnectionMeta,
} = require("../../utils/middleware/embedMiddleware");
const {
convertToChatHistory,
writeResponseChunk,
} = require("../../utils/helpers/chat/responses");
function embeddedEndpoints(app) {
if (!app) return;
app.post(
"/embed/:embedId/stream-chat",
[validEmbedConfig, setConnectionMeta, canRespond],
async (request, response) => {
try {
const embed = response.locals.embedConfig;
const {
sessionId,
message,
// optional keys for override of defaults if enabled.
prompt = null,
model = null,
temperature = null,
username = null,
} = reqBody(request);
response.setHeader("Cache-Control", "no-cache");
response.setHeader("Content-Type", "text/event-stream");
response.setHeader("Access-Control-Allow-Origin", "*");
response.setHeader("Connection", "keep-alive");
response.flushHeaders();
await streamChatWithForEmbed(response, embed, message, sessionId, {
promptOverride: prompt,
modelOverride: model,
temperatureOverride: temperature,
username,
});
await Telemetry.sendTelemetry("embed_sent_chat", {
multiUserMode: multiUserMode(response),
LLMSelection: process.env.LLM_PROVIDER || "openai",
Embedder: process.env.EMBEDDING_ENGINE || "inherit",
VectorDbSelection: process.env.VECTOR_DB || "lancedb",
});
response.end();
} catch (e) {
console.error(e);
writeResponseChunk(response, {
id: uuidv4(),
type: "abort",
sources: [],
textResponse: null,
close: true,
error: e.message,
});
response.end();
}
}
);
app.get(
"/embed/:embedId/:sessionId",
[validEmbedConfig],
async (request, response) => {
try {
const { sessionId } = request.params;
const embed = response.locals.embedConfig;
const history = await EmbedChats.forEmbedByUser(
embed.id,
sessionId,
null,
null,
true
);
response.status(200).json({ history: convertToChatHistory(history) });
} catch (e) {
console.error(e.message, e);
response.sendStatus(500).end();
}
}
);
app.delete(
"/embed/:embedId/:sessionId",
[validEmbedConfig],
async (request, response) => {
try {
const { sessionId } = request.params;
const embed = response.locals.embedConfig;
await EmbedChats.markHistoryInvalid(embed.id, sessionId);
response.status(200).end();
} catch (e) {
console.error(e.message, e);
response.sendStatus(500).end();
}
}
);
}
module.exports = { embeddedEndpoints };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/endpoints/experimental/index.js | server/endpoints/experimental/index.js | const { liveSyncEndpoints } = require("./liveSync");
const { importedAgentPluginEndpoints } = require("./imported-agent-plugins");
// All endpoints here are not stable and can move around - have breaking changes
// or are opt-in features that are not fully released.
// When a feature is promoted it should be removed from here and added to the appropriate scope.
function experimentalEndpoints(router) {
liveSyncEndpoints(router);
importedAgentPluginEndpoints(router);
}
module.exports = { experimentalEndpoints };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/endpoints/experimental/liveSync.js | server/endpoints/experimental/liveSync.js | const { DocumentSyncQueue } = require("../../models/documentSyncQueue");
const { Document } = require("../../models/documents");
const { EventLogs } = require("../../models/eventLogs");
const { SystemSettings } = require("../../models/systemSettings");
const { Telemetry } = require("../../models/telemetry");
const { reqBody } = require("../../utils/http");
const {
featureFlagEnabled,
} = require("../../utils/middleware/featureFlagEnabled");
const {
flexUserRoleValid,
ROLES,
} = require("../../utils/middleware/multiUserProtected");
const { validWorkspaceSlug } = require("../../utils/middleware/validWorkspace");
const { validatedRequest } = require("../../utils/middleware/validatedRequest");
function liveSyncEndpoints(app) {
if (!app) return;
app.post(
"/experimental/toggle-live-sync",
[validatedRequest, flexUserRoleValid([ROLES.admin])],
async (request, response) => {
try {
const { updatedStatus = false } = reqBody(request);
const newStatus =
SystemSettings.validations.experimental_live_file_sync(updatedStatus);
const currentStatus =
(await SystemSettings.get({ label: "experimental_live_file_sync" }))
?.value || "disabled";
if (currentStatus === newStatus)
return response
.status(200)
.json({ liveSyncEnabled: newStatus === "enabled" });
// Already validated earlier - so can hot update.
await SystemSettings._updateSettings({
experimental_live_file_sync: newStatus,
});
if (newStatus === "enabled") {
await Telemetry.sendTelemetry("experimental_feature_enabled", {
feature: "live_file_sync",
});
await EventLogs.logEvent("experimental_feature_enabled", {
feature: "live_file_sync",
});
DocumentSyncQueue.bootWorkers();
} else {
DocumentSyncQueue.killWorkers();
}
response.status(200).json({ liveSyncEnabled: newStatus === "enabled" });
} catch (e) {
console.error(e);
response.status(500).end();
}
}
);
app.get(
"/experimental/live-sync/queues",
[
validatedRequest,
flexUserRoleValid([ROLES.admin]),
featureFlagEnabled(DocumentSyncQueue.featureKey),
],
async (_, response) => {
const queues = await DocumentSyncQueue.where(
{},
null,
{ createdAt: "asc" },
{
workspaceDoc: {
include: {
workspace: true,
},
},
}
);
response.status(200).json({ queues });
}
);
// Should be in workspace routes, but is here for now.
app.post(
"/workspace/:slug/update-watch-status",
[
validatedRequest,
flexUserRoleValid([ROLES.admin, ROLES.manager]),
validWorkspaceSlug,
featureFlagEnabled(DocumentSyncQueue.featureKey),
],
async (request, response) => {
try {
const { docPath, watchStatus = false } = reqBody(request);
const workspace = response.locals.workspace;
const document = await Document.get({
workspaceId: workspace.id,
docpath: docPath,
});
if (!document) return response.sendStatus(404).end();
await DocumentSyncQueue.toggleWatchStatus(document, watchStatus);
return response.status(200).end();
} catch (error) {
console.error("Error processing the watch status update:", error);
return response.status(500).end();
}
}
);
}
module.exports = { liveSyncEndpoints };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/endpoints/experimental/imported-agent-plugins.js | server/endpoints/experimental/imported-agent-plugins.js | const ImportedPlugin = require("../../utils/agents/imported");
const { reqBody } = require("../../utils/http");
const {
flexUserRoleValid,
ROLES,
} = require("../../utils/middleware/multiUserProtected");
const { validatedRequest } = require("../../utils/middleware/validatedRequest");
function importedAgentPluginEndpoints(app) {
if (!app) return;
app.post(
"/experimental/agent-plugins/:hubId/toggle",
[validatedRequest, flexUserRoleValid([ROLES.admin])],
(request, response) => {
try {
const { hubId } = request.params;
const { active } = reqBody(request);
const updatedConfig = ImportedPlugin.updateImportedPlugin(hubId, {
active: Boolean(active),
});
response.status(200).json(updatedConfig);
} catch (e) {
console.error(e);
response.status(500).end();
}
}
);
app.post(
"/experimental/agent-plugins/:hubId/config",
[validatedRequest, flexUserRoleValid([ROLES.admin])],
(request, response) => {
try {
const { hubId } = request.params;
const { updates } = reqBody(request);
const updatedConfig = ImportedPlugin.updateImportedPlugin(
hubId,
updates
);
response.status(200).json(updatedConfig);
} catch (e) {
console.error(e);
response.status(500).end();
}
}
);
app.delete(
"/experimental/agent-plugins/:hubId",
[validatedRequest, flexUserRoleValid([ROLES.admin])],
async (request, response) => {
try {
const { hubId } = request.params;
const result = ImportedPlugin.deletePlugin(hubId);
response.status(200).json(result);
} catch (e) {
console.error(e);
response.status(500).end();
}
}
);
}
module.exports = { importedAgentPluginEndpoints };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/passwordRecovery.js | server/models/passwordRecovery.js | const { v4 } = require("uuid");
const prisma = require("../utils/prisma");
const bcrypt = require("bcryptjs");
const RecoveryCode = {
tablename: "recovery_codes",
writable: [],
create: async function (userId, code) {
try {
const codeHash = await bcrypt.hash(code, 10);
const recoveryCode = await prisma.recovery_codes.create({
data: { user_id: userId, code_hash: codeHash },
});
return { recoveryCode, error: null };
} catch (error) {
console.error("FAILED TO CREATE RECOVERY CODE.", error.message);
return { recoveryCode: null, error: error.message };
}
},
createMany: async function (data) {
try {
const recoveryCodes = await prisma.$transaction(
data.map((recoveryCode) =>
prisma.recovery_codes.create({ data: recoveryCode })
)
);
return { recoveryCodes, error: null };
} catch (error) {
console.error("FAILED TO CREATE RECOVERY CODES.", error.message);
return { recoveryCodes: null, error: error.message };
}
},
findFirst: async function (clause = {}) {
try {
const recoveryCode = await prisma.recovery_codes.findFirst({
where: clause,
});
return recoveryCode;
} catch (error) {
console.error("FAILED TO FIND RECOVERY CODE.", error.message);
return null;
}
},
findMany: async function (clause = {}) {
try {
const recoveryCodes = await prisma.recovery_codes.findMany({
where: clause,
});
return recoveryCodes;
} catch (error) {
console.error("FAILED TO FIND RECOVERY CODES.", error.message);
return null;
}
},
deleteMany: async function (clause = {}) {
try {
await prisma.recovery_codes.deleteMany({ where: clause });
return true;
} catch (error) {
console.error("FAILED TO DELETE RECOVERY CODES.", error.message);
return false;
}
},
hashesForUser: async function (userId = null) {
if (!userId) return [];
return (await this.findMany({ user_id: userId })).map(
(recovery) => recovery.code_hash
);
},
};
const PasswordResetToken = {
tablename: "password_reset_tokens",
resetExpiryMs: 600_000, // 10 minutes in ms;
writable: [],
calcExpiry: function () {
return new Date(Date.now() + this.resetExpiryMs);
},
create: async function (userId) {
try {
const passwordResetToken = await prisma.password_reset_tokens.create({
data: { user_id: userId, token: v4(), expiresAt: this.calcExpiry() },
});
return { passwordResetToken, error: null };
} catch (error) {
console.error("FAILED TO CREATE PASSWORD RESET TOKEN.", error.message);
return { passwordResetToken: null, error: error.message };
}
},
findUnique: async function (clause = {}) {
try {
const passwordResetToken = await prisma.password_reset_tokens.findUnique({
where: clause,
});
return passwordResetToken;
} catch (error) {
console.error("FAILED TO FIND PASSWORD RESET TOKEN.", error.message);
return null;
}
},
deleteMany: async function (clause = {}) {
try {
await prisma.password_reset_tokens.deleteMany({ where: clause });
return true;
} catch (error) {
console.error("FAILED TO DELETE PASSWORD RESET TOKEN.", error.message);
return false;
}
},
};
module.exports = {
RecoveryCode,
PasswordResetToken,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/welcomeMessages.js | server/models/welcomeMessages.js | const prisma = require("../utils/prisma");
const WelcomeMessages = {
get: async function (clause = {}) {
try {
const message = await prisma.welcome_messages.findFirst({
where: clause,
});
return message || null;
} catch (error) {
console.error(error.message);
return null;
}
},
where: async function (clause = {}, limit) {
try {
const messages = await prisma.welcome_messages.findMany({
where: clause,
take: limit || undefined,
});
return messages;
} catch (error) {
console.error(error.message);
return [];
}
},
saveAll: async function (messages) {
try {
await prisma.welcome_messages.deleteMany({}); // Delete all existing messages
// Create new messages
// We create each message individually because prisma
// with sqlite does not support createMany()
for (const [index, message] of messages.entries()) {
if (!message.response && !message.user) continue;
await prisma.welcome_messages.create({
data: {
user: message.user,
response: message.response,
orderIndex: index,
},
});
}
} catch (error) {
console.error("Failed to save all messages", error.message);
}
},
getMessages: async function () {
try {
const messages = await prisma.welcome_messages.findMany({
orderBy: { orderIndex: "asc" },
select: { user: true, response: true },
});
return messages;
} catch (error) {
console.error("Failed to get all messages", error.message);
return [];
}
},
};
module.exports.WelcomeMessages = WelcomeMessages;
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/workspaceThread.js | server/models/workspaceThread.js | const prisma = require("../utils/prisma");
const slugifyModule = require("slugify");
const { v4: uuidv4 } = require("uuid");
const WorkspaceThread = {
defaultName: "Thread",
writable: ["name"],
/**
* The default Slugify module requires some additional mapping to prevent downstream issues
* if the user is able to define a slug externally. We have to block non-escapable URL chars
* so that is the slug is rendered it doesn't break the URL or UI when visited.
* @param {...any} args - slugify args for npm package.
* @returns {string}
*/
slugify: function (...args) {
slugifyModule.extend({
"+": " plus ",
"!": " bang ",
"@": " at ",
"*": " splat ",
".": " dot ",
":": "",
"~": "",
"(": "",
")": "",
"'": "",
'"': "",
"|": "",
});
return slugifyModule(...args);
},
new: async function (workspace, userId = null, data = {}) {
try {
const thread = await prisma.workspace_threads.create({
data: {
name: data.name ? String(data.name) : this.defaultName,
slug: data.slug
? this.slugify(data.slug, { lowercase: true })
: uuidv4(),
user_id: userId ? Number(userId) : null,
workspace_id: workspace.id,
},
});
return { thread, message: null };
} catch (error) {
console.error(error.message);
return { thread: null, message: error.message };
}
},
update: async function (prevThread = null, data = {}) {
if (!prevThread) throw new Error("No thread id provided for update");
const validData = {};
Object.entries(data).forEach(([key, value]) => {
if (!this.writable.includes(key)) return;
validData[key] = value;
});
if (Object.keys(validData).length === 0)
return { thread: prevThread, message: "No valid fields to update!" };
try {
const thread = await prisma.workspace_threads.update({
where: { id: prevThread.id },
data: validData,
});
return { thread, message: null };
} catch (error) {
console.error(error.message);
return { thread: null, message: error.message };
}
},
get: async function (clause = {}) {
try {
const thread = await prisma.workspace_threads.findFirst({
where: clause,
});
return thread || null;
} catch (error) {
console.error(error.message);
return null;
}
},
delete: async function (clause = {}) {
try {
await prisma.workspace_threads.deleteMany({
where: clause,
});
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
where: async function (
clause = {},
limit = null,
orderBy = null,
include = null
) {
try {
const results = await prisma.workspace_threads.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
...(include !== null ? { include } : {}),
});
return results;
} catch (error) {
console.error(error.message);
return [];
}
},
// Will fire on first message (included or not) for a thread and rename the thread with the newName prop.
autoRenameThread: async function ({
workspace = null,
thread = null,
user = null,
newName = null,
onRename = null,
}) {
if (!workspace || !thread || !newName) return false;
if (thread.name !== this.defaultName) return false; // don't rename if already named.
const { WorkspaceChats } = require("./workspaceChats");
const chatCount = await WorkspaceChats.count({
workspaceId: workspace.id,
user_id: user?.id || null,
thread_id: thread.id,
});
if (chatCount !== 1) return { renamed: false, thread };
const { thread: updatedThread } = await this.update(thread, {
name: newName,
});
onRename?.(updatedThread);
return true;
},
};
module.exports = { WorkspaceThread };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/eventLogs.js | server/models/eventLogs.js | const prisma = require("../utils/prisma");
const EventLogs = {
logEvent: async function (event, metadata = {}, userId = null) {
try {
const eventLog = await prisma.event_logs.create({
data: {
event,
metadata: metadata ? JSON.stringify(metadata) : null,
userId: userId ? Number(userId) : null,
occurredAt: new Date(),
},
});
console.log(`\x1b[32m[Event Logged]\x1b[0m - ${event}`);
return { eventLog, message: null };
} catch (error) {
console.error(
`\x1b[31m[Event Logging Failed]\x1b[0m - ${event}`,
error.message
);
return { eventLog: null, message: error.message };
}
},
getByEvent: async function (event, limit = null, orderBy = null) {
try {
const logs = await prisma.event_logs.findMany({
where: { event },
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null
? { orderBy }
: { orderBy: { occurredAt: "desc" } }),
});
return logs;
} catch (error) {
console.error(error.message);
return [];
}
},
getByUserId: async function (userId, limit = null, orderBy = null) {
try {
const logs = await prisma.event_logs.findMany({
where: { userId },
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null
? { orderBy }
: { orderBy: { occurredAt: "desc" } }),
});
return logs;
} catch (error) {
console.error(error.message);
return [];
}
},
where: async function (
clause = {},
limit = null,
orderBy = null,
offset = null
) {
try {
const logs = await prisma.event_logs.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(offset !== null ? { skip: offset } : {}),
...(orderBy !== null
? { orderBy }
: { orderBy: { occurredAt: "desc" } }),
});
return logs;
} catch (error) {
console.error(error.message);
return [];
}
},
whereWithData: async function (
clause = {},
limit = null,
offset = null,
orderBy = null
) {
const { User } = require("./user");
try {
const results = await this.where(clause, limit, orderBy, offset);
for (const res of results) {
const user = res.userId ? await User.get({ id: res.userId }) : null;
res.user = user
? { username: user.username }
: { username: "unknown user" };
}
return results;
} catch (error) {
console.error(error.message);
return [];
}
},
count: async function (clause = {}) {
try {
const count = await prisma.event_logs.count({
where: clause,
});
return count;
} catch (error) {
console.error(error.message);
return 0;
}
},
delete: async function (clause = {}) {
try {
await prisma.event_logs.deleteMany({
where: clause,
});
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
};
module.exports = { EventLogs };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/promptHistory.js | server/models/promptHistory.js | const prisma = require("../utils/prisma");
const PromptHistory = {
new: async function ({ workspaceId, prompt, modifiedBy = null }) {
try {
const history = await prisma.prompt_history.create({
data: {
workspaceId: Number(workspaceId),
prompt: String(prompt),
modifiedBy: !!modifiedBy ? Number(modifiedBy) : null,
},
});
return { history, message: null };
} catch (error) {
console.error(error.message);
return { history: null, message: error.message };
}
},
/**
* Get the prompt history for a workspace.
* @param {number} workspaceId - The ID of the workspace to get prompt history for.
* @param {number|null} limit - The maximum number of history items to return.
* @param {string|null} orderBy - The field to order the history by.
* @returns {Promise<Array<{id: number, prompt: string, modifiedAt: Date, modifiedBy: number, user: {username: string}}>>} A promise that resolves to an array of prompt history objects.
*/
forWorkspace: async function (
workspaceId = null,
limit = null,
orderBy = null
) {
if (!workspaceId) return [];
try {
const history = await prisma.prompt_history.findMany({
where: { workspaceId: Number(workspaceId) },
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null
? { orderBy }
: { orderBy: { modifiedAt: "desc" } }),
include: {
user: {
select: {
username: true,
},
},
},
});
return history;
} catch (error) {
console.error(error.message);
return [];
}
},
get: async function (clause = {}, limit = null, orderBy = null) {
try {
const history = await prisma.prompt_history.findFirst({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
include: {
user: {
select: {
id: true,
username: true,
role: true,
},
},
},
});
return history || null;
} catch (error) {
console.error(error.message);
return null;
}
},
delete: async function (clause = {}) {
try {
await prisma.prompt_history.deleteMany({ where: clause });
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
/**
* Utility method to handle prompt changes and create history entries
* @param {import('./workspace').Workspace} workspaceData - The workspace object (previous state)
* @param {{id: number, role: string}|null} user - The user making the change
* @returns {Promise<void>}
*/
handlePromptChange: async function (workspaceData, user = null) {
try {
await this.new({
workspaceId: workspaceData.id,
prompt: workspaceData.openAiPrompt, // Store previous prompt as history
modifiedBy: user?.id,
});
} catch (error) {
console.error("Failed to create prompt history:", error.message);
}
},
};
module.exports = { PromptHistory };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/vectors.js | server/models/vectors.js | const prisma = require("../utils/prisma");
const { Document } = require("./documents");
const DocumentVectors = {
bulkInsert: async function (vectorRecords = []) {
if (vectorRecords.length === 0) return;
try {
const inserts = [];
vectorRecords.forEach((record) => {
inserts.push(
prisma.document_vectors.create({
data: {
docId: record.docId,
vectorId: record.vectorId,
},
})
);
});
await prisma.$transaction(inserts);
return { documentsInserted: inserts.length };
} catch (error) {
console.error("Bulk insert failed", error);
return { documentsInserted: 0 };
}
},
where: async function (clause = {}, limit) {
try {
const results = await prisma.document_vectors.findMany({
where: clause,
take: limit || undefined,
});
return results;
} catch (error) {
console.error("Where query failed", error);
return [];
}
},
deleteForWorkspace: async function (workspaceId) {
const documents = await Document.forWorkspace(workspaceId);
const docIds = [...new Set(documents.map((doc) => doc.docId))];
try {
await prisma.document_vectors.deleteMany({
where: { docId: { in: docIds } },
});
return true;
} catch (error) {
console.error("Delete for workspace failed", error);
return false;
}
},
deleteIds: async function (ids = []) {
try {
await prisma.document_vectors.deleteMany({
where: { id: { in: ids } },
});
return true;
} catch (error) {
console.error("Delete IDs failed", error);
return false;
}
},
delete: async function (clause = {}) {
try {
await prisma.document_vectors.deleteMany({ where: clause });
return true;
} catch (error) {
console.error("Delete failed", error);
return false;
}
},
};
module.exports = { DocumentVectors };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/user.js | server/models/user.js | const prisma = require("../utils/prisma");
const { EventLogs } = require("./eventLogs");
/**
* @typedef {Object} User
* @property {number} id
* @property {string} username
* @property {string} password
* @property {string} pfpFilename
* @property {string} role
* @property {boolean} suspended
* @property {number|null} dailyMessageLimit
*/
const User = {
usernameRegex: new RegExp(/^[a-zA-Z0-9._%+-@]+$/),
writable: [
// Used for generic updates so we can validate keys in request body
"username",
"password",
"pfpFilename",
"role",
"suspended",
"dailyMessageLimit",
"bio",
],
validations: {
username: (newValue = "") => {
try {
if (String(newValue).length > 100)
throw new Error("Username cannot be longer than 100 characters");
if (String(newValue).length < 2)
throw new Error("Username must be at least 2 characters");
return String(newValue);
} catch (e) {
throw new Error(e.message);
}
},
role: (role = "default") => {
const VALID_ROLES = ["default", "admin", "manager"];
if (!VALID_ROLES.includes(role)) {
throw new Error(
`Invalid role. Allowed roles are: ${VALID_ROLES.join(", ")}`
);
}
return String(role);
},
dailyMessageLimit: (dailyMessageLimit = null) => {
if (dailyMessageLimit === null) return null;
const limit = Number(dailyMessageLimit);
if (isNaN(limit) || limit < 1) {
throw new Error(
"Daily message limit must be null or a number greater than or equal to 1"
);
}
return limit;
},
bio: (bio = "") => {
if (!bio || typeof bio !== "string") return "";
if (bio.length > 1000)
throw new Error("Bio cannot be longer than 1,000 characters");
return String(bio);
},
},
// validations for the above writable fields.
castColumnValue: function (key, value) {
switch (key) {
case "suspended":
return Number(Boolean(value));
case "dailyMessageLimit":
return value === null ? null : Number(value);
default:
return String(value);
}
},
filterFields: function (user = {}) {
const { password, ...rest } = user;
return { ...rest };
},
create: async function ({
username,
password,
role = "default",
dailyMessageLimit = null,
bio = "",
}) {
const passwordCheck = this.checkPasswordComplexity(password);
if (!passwordCheck.checkedOK) {
return { user: null, error: passwordCheck.error };
}
try {
// Do not allow new users to bypass validation
if (!this.usernameRegex.test(username))
throw new Error(
"Username must only contain letters, numbers, periods, underscores, hyphens, and email characters (@, %, +, -) with no spaces"
);
const bcrypt = require("bcryptjs");
const hashedPassword = bcrypt.hashSync(password, 10);
const user = await prisma.users.create({
data: {
username: this.validations.username(username),
password: hashedPassword,
role: this.validations.role(role),
bio: this.validations.bio(bio),
dailyMessageLimit:
this.validations.dailyMessageLimit(dailyMessageLimit),
},
});
return { user: this.filterFields(user), error: null };
} catch (error) {
console.error("FAILED TO CREATE USER.", error.message);
return { user: null, error: error.message };
}
},
// Log the changes to a user object, but omit sensitive fields
// that are not meant to be logged.
loggedChanges: function (updates, prev = {}) {
const changes = {};
const sensitiveFields = ["password"];
Object.keys(updates).forEach((key) => {
if (!sensitiveFields.includes(key) && updates[key] !== prev[key]) {
changes[key] = `${prev[key]} => ${updates[key]}`;
}
});
return changes;
},
update: async function (userId, updates = {}) {
try {
if (!userId) throw new Error("No user id provided for update");
const currentUser = await prisma.users.findUnique({
where: { id: parseInt(userId) },
});
if (!currentUser) return { success: false, error: "User not found" };
// Removes non-writable fields for generic updates
// and force-casts to the proper type;
Object.entries(updates).forEach(([key, value]) => {
if (this.writable.includes(key)) {
if (this.validations.hasOwnProperty(key)) {
updates[key] = this.validations[key](
this.castColumnValue(key, value)
);
} else {
updates[key] = this.castColumnValue(key, value);
}
return;
}
delete updates[key];
});
if (Object.keys(updates).length === 0)
return { success: false, error: "No valid updates applied." };
// Handle password specific updates
if (updates.hasOwnProperty("password")) {
const passwordCheck = this.checkPasswordComplexity(updates.password);
if (!passwordCheck.checkedOK) {
return { success: false, error: passwordCheck.error };
}
const bcrypt = require("bcryptjs");
updates.password = bcrypt.hashSync(updates.password, 10);
}
if (
updates.hasOwnProperty("username") &&
currentUser.username !== updates.username &&
!this.usernameRegex.test(updates.username)
)
return {
success: false,
error:
"Username must only contain letters, numbers, periods, underscores, hyphens, and email characters (@, %, +, -) with no spaces",
};
const user = await prisma.users.update({
where: { id: parseInt(userId) },
data: updates,
});
await EventLogs.logEvent(
"user_updated",
{
username: user.username,
changes: this.loggedChanges(updates, currentUser),
},
userId
);
return { success: true, error: null };
} catch (error) {
console.error(error.message);
return { success: false, error: error.message };
}
},
// Explicit direct update of user object.
// Only use this method when directly setting a key value
// that takes no user input for the keys being modified.
_update: async function (id = null, data = {}) {
if (!id) throw new Error("No user id provided for update");
try {
const user = await prisma.users.update({
where: { id },
data,
});
return { user, message: null };
} catch (error) {
console.error(error.message);
return { user: null, message: error.message };
}
},
/**
* Returns a user object based on the clause provided.
* @param {Object} clause - The clause to use to find the user.
* @returns {Promise<import("@prisma/client").users|null>} The user object or null if not found.
*/
get: async function (clause = {}) {
try {
const user = await prisma.users.findFirst({ where: clause });
return user ? this.filterFields({ ...user }) : null;
} catch (error) {
console.error(error.message);
return null;
}
},
// Returns user object with all fields
_get: async function (clause = {}) {
try {
const user = await prisma.users.findFirst({ where: clause });
return user ? { ...user } : null;
} catch (error) {
console.error(error.message);
return null;
}
},
count: async function (clause = {}) {
try {
const count = await prisma.users.count({ where: clause });
return count;
} catch (error) {
console.error(error.message);
return 0;
}
},
delete: async function (clause = {}) {
try {
await prisma.users.deleteMany({ where: clause });
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
where: async function (clause = {}, limit = null) {
try {
const users = await prisma.users.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
});
return users.map((usr) => this.filterFields(usr));
} catch (error) {
console.error(error.message);
return [];
}
},
checkPasswordComplexity: function (passwordInput = "") {
const passwordComplexity = require("joi-password-complexity");
// Can be set via ENV variable on boot. No frontend config at this time.
// Docs: https://www.npmjs.com/package/joi-password-complexity
const complexityOptions = {
min: process.env.PASSWORDMINCHAR || 8,
max: process.env.PASSWORDMAXCHAR || 250,
lowerCase: process.env.PASSWORDLOWERCASE || 0,
upperCase: process.env.PASSWORDUPPERCASE || 0,
numeric: process.env.PASSWORDNUMERIC || 0,
symbol: process.env.PASSWORDSYMBOL || 0,
// reqCount should be equal to how many conditions you are testing for (1-4)
requirementCount: process.env.PASSWORDREQUIREMENTS || 0,
};
const complexityCheck = passwordComplexity(
complexityOptions,
"password"
).validate(passwordInput);
if (complexityCheck.hasOwnProperty("error")) {
let myError = "";
let prepend = "";
for (let i = 0; i < complexityCheck.error.details.length; i++) {
myError += prepend + complexityCheck.error.details[i].message;
prepend = ", ";
}
return { checkedOK: false, error: myError };
}
return { checkedOK: true, error: "No error." };
},
/**
* Check if a user can send a chat based on their daily message limit.
* This limit is system wide and not per workspace and only applies to
* multi-user mode AND non-admin users.
* @param {User} user The user object record.
* @returns {Promise<boolean>} True if the user can send a chat, false otherwise.
*/
canSendChat: async function (user) {
const { ROLES } = require("../utils/middleware/multiUserProtected");
if (!user || user.dailyMessageLimit === null || user.role === ROLES.admin)
return true;
const { WorkspaceChats } = require("./workspaceChats");
const currentChatCount = await WorkspaceChats.count({
user_id: user.id,
createdAt: {
gte: new Date(new Date() - 24 * 60 * 60 * 1000), // 24 hours
},
});
return currentChatCount < user.dailyMessageLimit;
},
};
module.exports = { User };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/embedChats.js | server/models/embedChats.js | const { safeJsonParse } = require("../utils/http");
const prisma = require("../utils/prisma");
/**
* @typedef {Object} EmbedChat
* @property {number} id
* @property {number} embed_id
* @property {string} prompt
* @property {string} response
* @property {string} connection_information
* @property {string} session_id
* @property {boolean} include
*/
const EmbedChats = {
new: async function ({
embedId,
prompt,
response = {},
connection_information = {},
sessionId,
}) {
try {
const chat = await prisma.embed_chats.create({
data: {
prompt,
embed_id: Number(embedId),
response: JSON.stringify(response),
connection_information: JSON.stringify(connection_information),
session_id: String(sessionId),
},
});
return { chat, message: null };
} catch (error) {
console.error(error.message);
return { chat: null, message: error.message };
}
},
/**
* Loops through each chat and filters out the sources from the response object.
* We do this when returning /history of an embed to the frontend to prevent inadvertent leaking
* of private sources the user may not have intended to share with users.
* @param {EmbedChat[]} chats
* @returns {EmbedChat[]} Returns a new array of chats with the sources filtered out of responses
*/
filterSources: function (chats) {
return chats.map((chat) => {
const { response, ...rest } = chat;
const { sources, ...responseRest } = safeJsonParse(response);
return { ...rest, response: JSON.stringify(responseRest) };
});
},
/**
* Fetches chats for a given embed and session id.
* @param {number} embedId the id of the embed to fetch chats for
* @param {string} sessionId the id of the session to fetch chats for
* @param {number|null} limit the maximum number of chats to fetch
* @param {string|null} orderBy the order to fetch chats in
* @param {boolean} filterSources whether to filter out the sources from the response (default: false)
* @returns {Promise<EmbedChat[]>} Returns an array of chats for the given embed and session
*/
forEmbedByUser: async function (
embedId = null,
sessionId = null,
limit = null,
orderBy = null,
filterSources = false
) {
if (!embedId || !sessionId) return [];
try {
const chats = await prisma.embed_chats.findMany({
where: {
embed_id: Number(embedId),
session_id: String(sessionId),
include: true,
},
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : { orderBy: { id: "asc" } }),
});
return filterSources ? this.filterSources(chats) : chats;
} catch (error) {
console.error(error.message);
return [];
}
},
markHistoryInvalid: async function (embedId = null, sessionId = null) {
if (!embedId || !sessionId) return [];
try {
await prisma.embed_chats.updateMany({
where: {
embed_id: Number(embedId),
session_id: String(sessionId),
},
data: {
include: false,
},
});
return;
} catch (error) {
console.error(error.message);
}
},
get: async function (clause = {}, limit = null, orderBy = null) {
try {
const chat = await prisma.embed_chats.findFirst({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return chat || null;
} catch (error) {
console.error(error.message);
return null;
}
},
delete: async function (clause = {}) {
try {
await prisma.embed_chats.deleteMany({
where: clause,
});
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
where: async function (
clause = {},
limit = null,
orderBy = null,
offset = null
) {
try {
const chats = await prisma.embed_chats.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(offset !== null ? { skip: offset } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return chats;
} catch (error) {
console.error(error.message);
return [];
}
},
whereWithEmbedAndWorkspace: async function (
clause = {},
limit = null,
orderBy = null,
offset = null
) {
try {
const chats = await prisma.embed_chats.findMany({
where: clause,
include: {
embed_config: {
select: {
workspace: {
select: {
name: true,
},
},
},
},
},
...(limit !== null ? { take: limit } : {}),
...(offset !== null ? { skip: offset } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return chats;
} catch (error) {
console.error(error.message);
return [];
}
},
count: async function (clause = {}) {
try {
const count = await prisma.embed_chats.count({
where: clause,
});
return count;
} catch (error) {
console.error(error.message);
return 0;
}
},
};
module.exports = { EmbedChats };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/systemPromptVariables.js | server/models/systemPromptVariables.js | const prisma = require("../utils/prisma");
const moment = require("moment");
/**
* @typedef {Object} SystemPromptVariable
* @property {number} id
* @property {string} key
* @property {string|function} value
* @property {string} description
* @property {'system'|'user'|'workspace'|'static'} type
* @property {number} userId
* @property {boolean} multiUserRequired
*/
const SystemPromptVariables = {
VALID_TYPES: ["user", "workspace", "system", "static"],
DEFAULT_VARIABLES: [
{
key: "time",
value: () => moment().format("LTS"),
description: "Current time",
type: "system",
multiUserRequired: false,
},
{
key: "date",
value: () => moment().format("LL"),
description: "Current date",
type: "system",
multiUserRequired: false,
},
{
key: "datetime",
value: () => moment().format("LLLL"),
description: "Current date and time",
type: "system",
multiUserRequired: false,
},
{
key: "user.id",
value: (userId = null) => {
if (!userId) return "[User ID]";
return userId;
},
description: "Current user's ID",
type: "user",
multiUserRequired: true,
},
{
key: "user.name",
value: async (userId = null) => {
if (!userId) return "[User name]";
try {
const user = await prisma.users.findUnique({
where: { id: Number(userId) },
select: { username: true },
});
return user?.username || "[User name is empty or unknown]";
} catch (error) {
console.error("Error fetching user name:", error);
return "[User name is empty or unknown]";
}
},
description: "Current user's username",
type: "user",
multiUserRequired: true,
},
{
key: "user.bio",
value: async (userId = null) => {
if (!userId) return "[User bio]";
try {
const user = await prisma.users.findUnique({
where: { id: Number(userId) },
select: { bio: true },
});
return user?.bio || "[User bio is empty]";
} catch (error) {
console.error("Error fetching user bio:", error);
return "[User bio is empty]";
}
},
description: "Current user's bio field from their profile",
type: "user",
multiUserRequired: true,
},
{
key: "workspace.id",
value: (workspaceId = null) => {
if (!workspaceId) return "[Workspace ID]";
return workspaceId;
},
description: "Current workspace's ID",
type: "workspace",
multiUserRequired: false,
},
{
key: "workspace.name",
value: async (workspaceId = null) => {
if (!workspaceId) return "[Workspace name]";
const workspace = await prisma.workspaces.findUnique({
where: { id: Number(workspaceId) },
select: { name: true },
});
return workspace?.name || "[Workspace name is empty or unknown]";
},
description: "Current workspace's name",
type: "workspace",
multiUserRequired: false,
},
],
/**
* Gets a system prompt variable by its key
* @param {string} key
* @returns {Promise<SystemPromptVariable>}
*/
get: async function (key = null) {
if (!key) return null;
const variable = await prisma.system_prompt_variables.findUnique({
where: { key: String(key) },
});
return variable;
},
/**
* Retrieves all system prompt variables with dynamic variables as well
* as user defined variables
* @param {number|null} userId - the current user ID (determines if in multi-user mode)
* @returns {Promise<SystemPromptVariable[]>}
*/
getAll: async function (userId = null) {
// All user-defined system variables are available to everyone globally since only admins can create them.
const userDefinedSystemVariables =
await prisma.system_prompt_variables.findMany();
const formattedDbVars = userDefinedSystemVariables.map((v) => ({
id: v.id,
key: v.key,
value: v.value,
description: v.description,
type: v.type,
userId: v.userId,
}));
// If userId is not provided, filter the default variables to only include non-multiUserRequired variables
// since we wont be able to dynamically inject user-related content.
const defaultSystemVariables = !userId
? this.DEFAULT_VARIABLES.filter((v) => !v.multiUserRequired)
: this.DEFAULT_VARIABLES;
return [...defaultSystemVariables, ...formattedDbVars];
},
/**
* Creates a new system prompt variable
* @param {{ key: string, value: string, description: string, type: string, userId: number }} data
* @returns {Promise<SystemPromptVariable>}
*/
create: async function ({
key,
value,
description = null,
type = "static",
userId = null,
}) {
await this._checkVariableKey(key, true);
return await prisma.system_prompt_variables.create({
data: {
key: String(key),
value: String(value),
description: description ? String(description) : null,
type: type ? String(type) : "static",
userId: userId ? Number(userId) : null,
},
});
},
/**
* Updates a system prompt variable by its unique database ID
* @param {number} id
* @param {{ key: string, value: string, description: string }} data
* @returns {Promise<SystemPromptVariable>}
*/
update: async function (id, { key, value, description = null }) {
if (!id || !key || !value) return null;
const existingRecord = await prisma.system_prompt_variables.findFirst({
where: { id: Number(id) },
});
if (!existingRecord) throw new Error("System prompt variable not found");
await this._checkVariableKey(key, false);
return await prisma.system_prompt_variables.update({
where: { id: existingRecord.id },
data: {
key: String(key),
value: String(value),
description: description ? String(description) : null,
},
});
},
/**
* Deletes a system prompt variable by its unique database ID
* @param {number} id
* @returns {Promise<boolean>}
*/
delete: async function (id = null) {
try {
await prisma.system_prompt_variables.delete({
where: { id: Number(id) },
});
return true;
} catch (error) {
console.error("Error deleting variable:", error);
return false;
}
},
/**
* Injects variables into a string based on the user ID and workspace ID (if provided) and the variables available
* @param {string} str - the input string to expand variables into
* @param {number|null} userId - the user ID to use for dynamic variables
* @param {number|null} workspaceId - the workspace ID to use for workspace variables
* @returns {Promise<string>}
*/
expandSystemPromptVariables: async function (
str,
userId = null,
workspaceId = null
) {
if (!str) return str;
try {
const allVariables = await this.getAll(userId);
let result = str;
// Find all variable patterns in the string
const matches = str.match(/\{([^}]+)\}/g) || [];
// Process each match
for (const match of matches) {
const key = match.substring(1, match.length - 1); // Remove { and }
// Determine if the variable is a class-based variable (workspace.X or user.X)
const isWorkspaceOrUserVariable = ["workspace.", "user."].some(
(prefix) => key.startsWith(prefix)
);
// Handle class-based variables with current workspace's or user's data
if (isWorkspaceOrUserVariable) {
let variableTypeDisplay;
if (key.startsWith("workspace.")) variableTypeDisplay = "Workspace";
else if (key.startsWith("user.")) variableTypeDisplay = "User";
else throw new Error(`Invalid class-based variable: ${key}`);
// Get the property name after the prefix
const prop = key.split(".")[1];
const variable = allVariables.find((v) => v.key === key);
// If the variable is a function, call it to get the current value
if (variable && typeof variable.value === "function") {
// If the variable is an async function, call it to get the current value
if (variable.value.constructor.name === "AsyncFunction") {
let value;
try {
if (variableTypeDisplay === "Workspace")
value = await variable.value(workspaceId);
else if (variableTypeDisplay === "User")
value = await variable.value(userId);
else throw new Error(`Invalid class-based variable: ${key}`);
} catch (error) {
console.error(
`Error processing ${variableTypeDisplay} variable ${key}:`,
error
);
value = `[${variableTypeDisplay} ${prop}]`;
}
result = result.replace(match, value);
} else {
let value;
try {
// Call the variable function with the appropriate workspace or user ID
if (variableTypeDisplay === "Workspace")
value = variable.value(workspaceId);
else if (variableTypeDisplay === "User")
value = variable.value(userId);
else throw new Error(`Invalid class-based variable: ${key}`);
} catch (error) {
console.error(
`Error processing ${variableTypeDisplay} variable ${key}:`,
error
);
value = `[${variableTypeDisplay} ${prop}]`;
}
result = result.replace(match, value);
}
} else {
// If the variable is not a function, replace the match with the variable value
result = result.replace(match, `[${variableTypeDisplay} ${prop}]`);
}
continue;
}
// Handle regular variables (static types)
const variable = allVariables.find((v) => v.key === key);
if (!variable) continue;
// For dynamic and system variables, call the function to get the current value
if (
["system"].includes(variable.type) &&
typeof variable.value === "function"
) {
try {
if (variable.value.constructor.name === "AsyncFunction") {
const value = await variable.value(userId);
result = result.replace(match, value);
} else {
const value = variable.value();
result = result.replace(match, value);
}
} catch (error) {
console.error(`Error processing dynamic variable ${key}:`, error);
result = result.replace(match, match);
}
} else {
result = result.replace(match, variable.value || match);
}
}
return result;
} catch (error) {
console.error("Error in expandSystemPromptVariables:", error);
return str;
}
},
/**
* Internal function to check if a variable key is valid
* @param {string} key
* @param {boolean} checkExisting
* @returns {Promise<boolean>}
*/
_checkVariableKey: async function (key = null, checkExisting = true) {
if (!key) throw new Error("Key is required");
if (typeof key !== "string") throw new Error("Key must be a string");
if (!/^[a-zA-Z0-9_]+$/.test(key))
throw new Error("Key must contain only letters, numbers and underscores");
if (key.length > 255)
throw new Error("Key must be less than 255 characters");
if (key.length < 3) throw new Error("Key must be at least 3 characters");
if (key.startsWith("user."))
throw new Error("Key cannot start with 'user.'");
if (key.startsWith("system."))
throw new Error("Key cannot start with 'system.'");
if (checkExisting && (await this.get(key)) !== null)
throw new Error("System prompt variable with this key already exists");
return true;
},
};
module.exports = { SystemPromptVariables };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/cacheData.js | server/models/cacheData.js | const prisma = require("../utils/prisma");
const CacheData = {
new: async function (inputs = {}) {
try {
const cache = await prisma.cache_data.create({
data: inputs,
});
return { cache, message: null };
} catch (error) {
console.error(error.message);
return { cache: null, message: error.message };
}
},
get: async function (clause = {}, limit = null, orderBy = null) {
try {
const cache = await prisma.cache_data.findFirst({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return cache || null;
} catch (error) {
console.error(error.message);
return null;
}
},
delete: async function (clause = {}) {
try {
await prisma.cache_data.deleteMany({
where: clause,
});
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
where: async function (clause = {}, limit = null, orderBy = null) {
try {
const caches = await prisma.cache_data.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return caches;
} catch (error) {
console.error(error.message);
return [];
}
},
count: async function (clause = {}) {
try {
const count = await prisma.cache_data.count({
where: clause,
});
return count;
} catch (error) {
console.error(error.message);
return 0;
}
},
};
module.exports = { CacheData };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/temporaryAuthToken.js | server/models/temporaryAuthToken.js | const { makeJWT } = require("../utils/http");
const prisma = require("../utils/prisma");
/**
* Temporary auth tokens are used for simple SSO.
* They simply enable the ability for a time-based token to be used in the query of the /sso/login URL
* to login as a user without the need of a username and password. These tokens are single-use and expire.
*/
const TemporaryAuthToken = {
expiry: 1000 * 60 * 6, // 1 hour
tablename: "temporary_auth_tokens",
writable: [],
makeTempToken: () => {
const uuidAPIKey = require("uuid-apikey");
return `allm-tat-${uuidAPIKey.create().apiKey}`;
},
/**
* Issues a temporary auth token for a user via its ID.
* @param {number} userId
* @returns {Promise<{token: string|null, error: string | null}>}
*/
issue: async function (userId = null) {
if (!userId)
throw new Error("User ID is required to issue a temporary auth token.");
await this.invalidateUserTokens(userId);
try {
const token = this.makeTempToken();
const expiresAt = new Date(Date.now() + this.expiry);
await prisma.temporary_auth_tokens.create({
data: {
token,
expiresAt,
userId: Number(userId),
},
});
return { token, error: null };
} catch (error) {
console.error("FAILED TO CREATE TEMPORARY AUTH TOKEN.", error.message);
return { token: null, error: error.message };
}
},
/**
* Invalidates (deletes) all temporary auth tokens for a user via their ID.
* @param {number} userId
* @returns {Promise<boolean>}
*/
invalidateUserTokens: async function (userId) {
if (!userId)
throw new Error(
"User ID is required to invalidate temporary auth tokens."
);
await prisma.temporary_auth_tokens.deleteMany({
where: { userId: Number(userId) },
});
return true;
},
/**
* Validates a temporary auth token and returns the session token
* to be set in the browser localStorage for authentication.
* @param {string} publicToken - the token to validate against
* @returns {Promise<{sessionToken: string|null, token: import("@prisma/client").temporary_auth_tokens & {user: import("@prisma/client").users} | null, error: string | null}>}
*/
validate: async function (publicToken = "") {
/** @type {import("@prisma/client").temporary_auth_tokens & {user: import("@prisma/client").users} | undefined | null} **/
let token;
try {
if (!publicToken)
throw new Error(
"Public token is required to validate a temporary auth token."
);
token = await prisma.temporary_auth_tokens.findUnique({
where: { token: String(publicToken) },
include: { user: true },
});
if (!token) throw new Error("Invalid token.");
if (token.expiresAt < new Date()) throw new Error("Token expired.");
if (token.user.suspended) throw new Error("User account suspended.");
// Create a new session token for the user valid for 30 days
const sessionToken = makeJWT(
{ id: token.user.id, username: token.user.username },
process.env.JWT_EXPIRY
);
return { sessionToken, token, error: null };
} catch (error) {
console.error("FAILED TO VALIDATE TEMPORARY AUTH TOKEN.", error.message);
return { sessionToken: null, token: null, error: error.message };
} finally {
// Delete the token after it has been used under all circumstances if it was retrieved
if (token)
await prisma.temporary_auth_tokens.delete({ where: { id: token.id } });
}
},
};
module.exports = { TemporaryAuthToken };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/slashCommandsPresets.js | server/models/slashCommandsPresets.js | const { v4 } = require("uuid");
const prisma = require("../utils/prisma");
const CMD_REGEX = new RegExp(/[^a-zA-Z0-9_-]/g);
const SlashCommandPresets = {
formatCommand: function (command = "") {
if (!command || command.length < 2) return `/${v4().split("-")[0]}`;
let adjustedCmd = command.toLowerCase(); // force lowercase
if (!adjustedCmd.startsWith("/")) adjustedCmd = `/${adjustedCmd}`; // Fix if no preceding / is found.
return `/${adjustedCmd.slice(1).toLowerCase().replace(CMD_REGEX, "-")}`; // replace any invalid chars with '-'
},
get: async function (clause = {}) {
try {
const preset = await prisma.slash_command_presets.findFirst({
where: clause,
});
return preset || null;
} catch (error) {
console.error(error.message);
return null;
}
},
where: async function (clause = {}, limit) {
try {
const presets = await prisma.slash_command_presets.findMany({
where: clause,
take: limit || undefined,
});
return presets;
} catch (error) {
console.error(error.message);
return [];
}
},
// Command + userId must be unique combination.
create: async function (userId = null, presetData = {}) {
try {
const existingPreset = await this.get({
userId: userId ? Number(userId) : null,
command: String(presetData.command),
});
if (existingPreset) {
console.log(
"SlashCommandPresets.create - preset already exists - will not create"
);
return existingPreset;
}
const preset = await prisma.slash_command_presets.create({
data: {
...presetData,
// This field (uid) is either the user_id or 0 (for non-multi-user mode).
// the UID field enforces the @@unique(userId, command) constraint since
// the real relational field (userId) cannot be non-null so this 'dummy' field gives us something
// to constrain against within the context of prisma and sqlite that works.
uid: userId ? Number(userId) : 0,
userId: userId ? Number(userId) : null,
},
});
return preset;
} catch (error) {
console.error("Failed to create preset", error.message);
return null;
}
},
getUserPresets: async function (userId = null) {
try {
return (
await prisma.slash_command_presets.findMany({
where: { userId: !!userId ? Number(userId) : null },
orderBy: { createdAt: "asc" },
})
)?.map((preset) => ({
id: preset.id,
command: preset.command,
prompt: preset.prompt,
description: preset.description,
}));
} catch (error) {
console.error("Failed to get user presets", error.message);
return [];
}
},
update: async function (presetId = null, presetData = {}) {
try {
const preset = await prisma.slash_command_presets.update({
where: { id: Number(presetId) },
data: presetData,
});
return preset;
} catch (error) {
console.error("Failed to update preset", error.message);
return null;
}
},
delete: async function (presetId = null) {
try {
await prisma.slash_command_presets.delete({
where: { id: Number(presetId) },
});
return true;
} catch (error) {
console.error("Failed to delete preset", error.message);
return false;
}
},
};
module.exports.SlashCommandPresets = SlashCommandPresets;
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/workspacesSuggestedMessages.js | server/models/workspacesSuggestedMessages.js | const prisma = require("../utils/prisma");
const WorkspaceSuggestedMessages = {
get: async function (clause = {}) {
try {
const message = await prisma.workspace_suggested_messages.findFirst({
where: clause,
});
return message || null;
} catch (error) {
console.error(error.message);
return null;
}
},
where: async function (clause = {}, limit) {
try {
const messages = await prisma.workspace_suggested_messages.findMany({
where: clause,
take: limit || undefined,
});
return messages;
} catch (error) {
console.error(error.message);
return [];
}
},
saveAll: async function (messages, workspaceSlug) {
try {
const workspace = await prisma.workspaces.findUnique({
where: { slug: workspaceSlug },
});
if (!workspace) throw new Error("Workspace not found");
// Delete all existing messages for the workspace
await prisma.workspace_suggested_messages.deleteMany({
where: { workspaceId: workspace.id },
});
// Create new messages
// We create each message individually because prisma
// with sqlite does not support createMany()
for (const message of messages) {
await prisma.workspace_suggested_messages.create({
data: {
workspaceId: workspace.id,
heading: message.heading,
message: message.message,
},
});
}
} catch (error) {
console.error("Failed to save all messages", error.message);
}
},
getMessages: async function (workspaceSlug) {
try {
const workspace = await prisma.workspaces.findUnique({
where: { slug: workspaceSlug },
});
if (!workspace) throw new Error("Workspace not found");
const messages = await prisma.workspace_suggested_messages.findMany({
where: { workspaceId: workspace.id },
orderBy: { createdAt: "asc" },
});
return messages.map((msg) => ({
heading: msg.heading,
message: msg.message,
}));
} catch (error) {
console.error("Failed to get all messages", error.message);
return [];
}
},
};
module.exports.WorkspaceSuggestedMessages = WorkspaceSuggestedMessages;
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/mobileDevice.js | server/models/mobileDevice.js | const prisma = require("../utils/prisma");
const { v4: uuidv4 } = require("uuid");
const ip = require("ip");
/**
* @typedef {Object} TemporaryMobileDeviceRequest
* @property {number|null} userId - User id to associate creation of key with.
* @property {number} createdAt - Timestamp of when the token was created.
* @property {number} expiresAt - Timestamp of when the token expires.
*/
/**
* Temporary map to store mobile device requests
* that are not yet approved. Generates a simple JWT
* that expires and is tied to the user (if provided)
* This token must be provided during /register event.
* @type {Map<string, TemporaryMobileDeviceRequest>}
*/
const TemporaryMobileDeviceRequests = new Map();
const MobileDevice = {
platform: "server",
validDeviceOs: ["android"],
tablename: "desktop_mobile_devices",
writable: ["approved"],
validators: {
approved: (value) => {
if (typeof value !== "boolean") return "Must be a boolean";
return null;
},
},
/**
* Looks up and consumes a temporary token that was registered
* Will return null if the token is not found or expired.
* @param {string} token - The temporary token to lookup
* @returns {TemporaryMobileDeviceRequest|null} Temp token details
*/
tempToken: (token = null) => {
try {
if (!token || !TemporaryMobileDeviceRequests.has(token)) return null;
const tokenData = TemporaryMobileDeviceRequests.get(token);
if (tokenData.expiresAt < Date.now()) return null;
return tokenData;
} catch (error) {
return null;
} finally {
TemporaryMobileDeviceRequests.delete(token);
}
},
/**
* Registers a temporary token for a mobile device request
* This is just using a random token to identify the request
* @security Note: If we use a JWT the QR code that encodes it becomes extremely complex
* and noisy as QR codes have byte limits that could be exceeded with JWTs. Since this is
* a temporary token that is only used to register a device and is short lived we can use UUIDs.
* @param {import("@prisma/client").users|null} user - User to get connection URL for in Multi-User Mode
* @returns {string} The temporary token
*/
registerTempToken: function (user = null) {
let tokenData = {};
if (user) tokenData.userId = user.id;
else tokenData.userId = null;
// Set short lived expiry to this mapping
const createdAt = Date.now();
tokenData.createdAt = createdAt;
tokenData.expiresAt = createdAt + 3 * 60_000;
const tempToken = uuidv4().split("-").slice(0, 3).join("");
TemporaryMobileDeviceRequests.set(tempToken, tokenData);
// Run this on register since there is no BG task to do this.
this.cleanupExpiredTokens();
return tempToken;
},
/**
* Cleans up expired temporary registration tokens
* Should run quick since this mapping is wiped often
* and does not live past restarts.
*/
cleanupExpiredTokens: function () {
const now = Date.now();
for (const [token, data] of TemporaryMobileDeviceRequests.entries()) {
if (data.expiresAt < now) TemporaryMobileDeviceRequests.delete(token);
}
},
/**
* Returns the connection URL for the mobile app to use to connect to the backend.
* Since you have to have a valid session to call /mobile/connect-info we can pre-register
* a temporary token for the user that is passed back to /mobile/register and can lookup
* who a device belongs to so we can scope it's access token.
* @param {import("@prisma/client").users|null} user - User to get connection URL for in Multi-User Mode
* @returns {string}
*/
connectionURL: function (user = null) {
let baseUrl = "/api/mobile";
if (process.env.NODE_ENV === "production") baseUrl = "/api/mobile";
else
baseUrl = `http://${ip.address()}:${process.env.SERVER_PORT || 3001}/api/mobile`;
const tempToken = this.registerTempToken(user);
baseUrl = `${baseUrl}?t=${tempToken}`;
return baseUrl;
},
/**
* Creates a new device for the mobile app
* @param {object} params - The params to create the device with.
* @param {string} params.deviceOs - Device os to associate creation of key with.
* @param {string} params.deviceName - Device name to associate creation of key with.
* @param {number|null} params.userId - User id to associate creation of key with.
* @returns {Promise<{device: import("@prisma/client").desktop_mobile_devices|null, error:string|null}>}
*/
create: async function ({ deviceOs, deviceName, userId = null }) {
try {
if (!deviceOs || !deviceName)
return { device: null, error: "Device OS and name are required" };
if (!this.validDeviceOs.includes(deviceOs))
return { device: null, error: `Invalid device OS - ${deviceOs}` };
const device = await prisma.desktop_mobile_devices.create({
data: {
deviceName: String(deviceName),
deviceOs: String(deviceOs).toLowerCase(),
token: uuidv4(),
userId: userId ? Number(userId) : null,
},
});
return { device, error: null };
} catch (error) {
console.error("Failed to create mobile device", error);
return { device: null, error: error.message };
}
},
/**
* Validated existing API key
* @param {string} id - Device id (db id)
* @param {object} updates - Updates to apply to device
* @returns {Promise<{device: import("@prisma/client").desktop_mobile_devices|null, error:string|null}>}
*/
update: async function (id, updates = {}) {
const device = await this.get({ id: parseInt(id) });
if (!device) return { device: null, error: "Device not found" };
const validUpdates = {};
for (const [key, value] of Object.entries(updates)) {
if (!this.writable.includes(key)) continue;
const validation = this.validators[key](value);
if (validation !== null) return { device: null, error: validation };
validUpdates[key] = value;
}
// If no updates, return the device.
if (Object.keys(validUpdates).length === 0) return { device, error: null };
const updatedDevice = await prisma.desktop_mobile_devices.update({
where: { id: device.id },
data: validUpdates,
});
return { device: updatedDevice, error: null };
},
/**
* Fetches mobile device by params.
* @param {object} clause - Prisma props for search
* @returns {Promise<import("@prisma/client").desktop_mobile_devices[]>}
*/
get: async function (clause = {}, include = null) {
try {
const device = await prisma.desktop_mobile_devices.findFirst({
where: clause,
...(include !== null ? { include } : {}),
});
return device;
} catch (error) {
console.error("FAILED TO GET MOBILE DEVICE.", error);
return [];
}
},
/**
* Deletes mobile device by db id.
* @param {number} id - database id of mobile device
* @returns {Promise<{success: boolean, error:string|null}>}
*/
delete: async function (id) {
try {
await prisma.desktop_mobile_devices.delete({
where: { id: parseInt(id) },
});
return { success: true, error: null };
} catch (error) {
console.error("Failed to delete mobile device", error);
return { success: false, error: error.message };
}
},
/**
* Gets mobile devices by params
* @param {object} clause
* @param {number|null} limit
* @param {object|null} orderBy
* @returns {Promise<import("@prisma/client").desktop_mobile_devices[]>}
*/
where: async function (
clause = {},
limit = null,
orderBy = null,
include = null
) {
try {
const devices = await prisma.desktop_mobile_devices.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
...(include !== null ? { include } : {}),
});
return devices;
} catch (error) {
console.error("FAILED TO GET MOBILE DEVICES.", error.message);
return [];
}
},
};
module.exports = { MobileDevice };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/embedConfig.js | server/models/embedConfig.js | const { v4 } = require("uuid");
const prisma = require("../utils/prisma");
const { VALID_CHAT_MODE } = require("../utils/chats/stream");
const EmbedConfig = {
writable: [
// Used for generic updates so we can validate keys in request body
"enabled",
"allowlist_domains",
"allow_model_override",
"allow_temperature_override",
"allow_prompt_override",
"max_chats_per_day",
"max_chats_per_session",
"chat_mode",
"workspace_id",
"message_limit",
],
new: async function (data, creatorId = null) {
try {
const embed = await prisma.embed_configs.create({
data: {
uuid: v4(),
enabled: true,
chat_mode: validatedCreationData(data?.chat_mode, "chat_mode"),
allowlist_domains: validatedCreationData(
data?.allowlist_domains,
"allowlist_domains"
),
allow_model_override: validatedCreationData(
data?.allow_model_override,
"allow_model_override"
),
allow_temperature_override: validatedCreationData(
data?.allow_temperature_override,
"allow_temperature_override"
),
allow_prompt_override: validatedCreationData(
data?.allow_prompt_override,
"allow_prompt_override"
),
max_chats_per_day: validatedCreationData(
data?.max_chats_per_day,
"max_chats_per_day"
),
max_chats_per_session: validatedCreationData(
data?.max_chats_per_session,
"max_chats_per_session"
),
message_limit: validatedCreationData(
data?.message_limit,
"message_limit"
),
createdBy: Number(creatorId) ?? null,
workspace: {
connect: { id: Number(data.workspace_id) },
},
},
});
return { embed, message: null };
} catch (error) {
console.error(error.message);
return { embed: null, message: error.message };
}
},
update: async function (embedId = null, data = {}) {
if (!embedId) throw new Error("No embed id provided for update");
const validKeys = Object.keys(data).filter((key) =>
this.writable.includes(key)
);
if (validKeys.length === 0)
return { embed: { id }, message: "No valid fields to update!" };
const updates = {};
validKeys.map((key) => {
updates[key] = validatedCreationData(data[key], key);
});
try {
await prisma.embed_configs.update({
where: { id: Number(embedId) },
data: updates,
});
return { success: true, error: null };
} catch (error) {
console.error(error.message);
return { success: false, error: error.message };
}
},
get: async function (clause = {}) {
try {
const embedConfig = await prisma.embed_configs.findFirst({
where: clause,
});
return embedConfig || null;
} catch (error) {
console.error(error.message);
return null;
}
},
getWithWorkspace: async function (clause = {}) {
try {
const embedConfig = await prisma.embed_configs.findFirst({
where: clause,
include: {
workspace: true,
},
});
return embedConfig || null;
} catch (error) {
console.error(error.message);
return null;
}
},
delete: async function (clause = {}) {
try {
await prisma.embed_configs.delete({
where: clause,
});
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
where: async function (clause = {}, limit = null, orderBy = null) {
try {
const results = await prisma.embed_configs.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return results;
} catch (error) {
console.error(error.message);
return [];
}
},
whereWithWorkspace: async function (
clause = {},
limit = null,
orderBy = null
) {
try {
const results = await prisma.embed_configs.findMany({
where: clause,
include: {
workspace: true,
_count: {
select: { embed_chats: true },
},
},
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return results;
} catch (error) {
console.error(error.message);
return [];
}
},
// Will return null if process should be skipped
// an empty array means the system will check. This
// prevents a bad parse from allowing all requests
parseAllowedHosts: function (embed) {
if (!embed.allowlist_domains) return null;
try {
return JSON.parse(embed.allowlist_domains);
} catch {
console.error(`Failed to parse allowlist_domains for Embed ${embed.id}!`);
return [];
}
},
};
const BOOLEAN_KEYS = [
"allow_model_override",
"allow_temperature_override",
"allow_prompt_override",
"enabled",
];
const NUMBER_KEYS = [
"max_chats_per_day",
"max_chats_per_session",
"workspace_id",
"message_limit",
];
// Helper to validate a data object strictly into the proper format
function validatedCreationData(value, field) {
if (field === "chat_mode") {
if (!value || !VALID_CHAT_MODE.includes(value)) return "query";
return value;
}
if (field === "allowlist_domains") {
try {
if (!value) return null;
return JSON.stringify(
// Iterate and force all domains to URL object
// and stringify the result.
value
.split(",")
.map((input) => {
let url = input;
if (!url.includes("http://") && !url.includes("https://"))
url = `https://${url}`;
try {
new URL(url);
return url;
} catch {
return null;
}
})
.filter((u) => !!u)
);
} catch {
return null;
}
}
if (BOOLEAN_KEYS.includes(field)) {
return value === true || value === false ? value : false;
}
if (NUMBER_KEYS.includes(field)) {
return isNaN(value) || Number(value) <= 0 ? null : Number(value);
}
return null;
}
module.exports = { EmbedConfig };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/invite.js | server/models/invite.js | const { safeJsonParse } = require("../utils/http");
const prisma = require("../utils/prisma");
const Invite = {
makeCode: () => {
const uuidAPIKey = require("uuid-apikey");
return uuidAPIKey.create().apiKey;
},
create: async function ({ createdByUserId = 0, workspaceIds = [] }) {
try {
const invite = await prisma.invites.create({
data: {
code: this.makeCode(),
createdBy: createdByUserId,
workspaceIds: JSON.stringify(workspaceIds),
},
});
return { invite, error: null };
} catch (error) {
console.error("FAILED TO CREATE INVITE.", error.message);
return { invite: null, error: error.message };
}
},
deactivate: async function (inviteId = null) {
try {
await prisma.invites.update({
where: { id: Number(inviteId) },
data: { status: "disabled" },
});
return { success: true, error: null };
} catch (error) {
console.error(error.message);
return { success: false, error: error.message };
}
},
markClaimed: async function (inviteId = null, user) {
try {
const invite = await prisma.invites.update({
where: { id: Number(inviteId) },
data: { status: "claimed", claimedBy: user.id },
});
try {
if (!!invite?.workspaceIds) {
const { Workspace } = require("./workspace");
const { WorkspaceUser } = require("./workspaceUsers");
const workspaceIds = (await Workspace.where({})).map(
(workspace) => workspace.id
);
const ids = safeJsonParse(invite.workspaceIds)
.map((id) => Number(id))
.filter((id) => workspaceIds.includes(id));
if (ids.length !== 0) await WorkspaceUser.createMany(user.id, ids);
}
} catch (e) {
console.error(
"Could not add user to workspaces automatically",
e.message
);
}
return { success: true, error: null };
} catch (error) {
console.error(error.message);
return { success: false, error: error.message };
}
},
get: async function (clause = {}) {
try {
const invite = await prisma.invites.findFirst({ where: clause });
return invite || null;
} catch (error) {
console.error(error.message);
return null;
}
},
count: async function (clause = {}) {
try {
const count = await prisma.invites.count({ where: clause });
return count;
} catch (error) {
console.error(error.message);
return 0;
}
},
delete: async function (clause = {}) {
try {
await prisma.invites.deleteMany({ where: clause });
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
where: async function (clause = {}, limit) {
try {
const invites = await prisma.invites.findMany({
where: clause,
take: limit || undefined,
});
return invites;
} catch (error) {
console.error(error.message);
return [];
}
},
whereWithUsers: async function (clause = {}, limit) {
const { User } = require("./user");
try {
const invites = await this.where(clause, limit);
for (const invite of invites) {
if (invite.claimedBy) {
const acceptedUser = await User.get({ id: invite.claimedBy });
invite.claimedBy = {
id: acceptedUser?.id,
username: acceptedUser?.username,
};
}
if (invite.createdBy) {
const createdUser = await User.get({ id: invite.createdBy });
invite.createdBy = {
id: createdUser?.id,
username: createdUser?.username,
};
}
}
return invites;
} catch (error) {
console.error(error.message);
return [];
}
},
};
module.exports = { Invite };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/workspaceChats.js | server/models/workspaceChats.js | const prisma = require("../utils/prisma");
const { safeJSONStringify } = require("../utils/helpers/chat/responses");
const WorkspaceChats = {
new: async function ({
workspaceId,
prompt,
response = {},
user = null,
threadId = null,
include = true,
apiSessionId = null,
}) {
try {
const chat = await prisma.workspace_chats.create({
data: {
workspaceId,
prompt,
response: safeJSONStringify(response),
user_id: user?.id || null,
thread_id: threadId,
api_session_id: apiSessionId,
include,
},
});
return { chat, message: null };
} catch (error) {
console.error(error.message);
return { chat: null, message: error.message };
}
},
forWorkspaceByUser: async function (
workspaceId = null,
userId = null,
limit = null,
orderBy = null
) {
if (!workspaceId || !userId) return [];
try {
const chats = await prisma.workspace_chats.findMany({
where: {
workspaceId,
user_id: userId,
thread_id: null, // this function is now only used for the default thread on workspaces and users
api_session_id: null, // do not include api-session chats in the frontend for anyone.
include: true,
},
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : { orderBy: { id: "asc" } }),
});
return chats;
} catch (error) {
console.error(error.message);
return [];
}
},
forWorkspaceByApiSessionId: async function (
workspaceId = null,
apiSessionId = null,
limit = null,
orderBy = null
) {
if (!workspaceId || !apiSessionId) return [];
try {
const chats = await prisma.workspace_chats.findMany({
where: {
workspaceId,
user_id: null,
api_session_id: String(apiSessionId),
thread_id: null,
},
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : { orderBy: { id: "asc" } }),
});
return chats;
} catch (error) {
console.error(error.message);
return [];
}
},
forWorkspace: async function (
workspaceId = null,
limit = null,
orderBy = null
) {
if (!workspaceId) return [];
try {
const chats = await prisma.workspace_chats.findMany({
where: {
workspaceId,
thread_id: null, // this function is now only used for the default thread on workspaces
api_session_id: null, // do not include api-session chats in the frontend for anyone.
include: true,
},
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : { orderBy: { id: "asc" } }),
});
return chats;
} catch (error) {
console.error(error.message);
return [];
}
},
/**
* @deprecated Use markThreadHistoryInvalidV2 instead.
*/
markHistoryInvalid: async function (workspaceId = null, user = null) {
if (!workspaceId) return;
try {
await prisma.workspace_chats.updateMany({
where: {
workspaceId,
user_id: user?.id,
thread_id: null, // this function is now only used for the default thread on workspaces
},
data: {
include: false,
},
});
return;
} catch (error) {
console.error(error.message);
}
},
/**
* @deprecated Use markThreadHistoryInvalidV2 instead.
*/
markThreadHistoryInvalid: async function (
workspaceId = null,
user = null,
threadId = null
) {
if (!workspaceId || !threadId) return;
try {
await prisma.workspace_chats.updateMany({
where: {
workspaceId,
thread_id: threadId,
user_id: user?.id,
},
data: {
include: false,
},
});
return;
} catch (error) {
console.error(error.message);
}
},
/**
* @description This function is used to mark a thread's history as invalid.
* and works with an arbitrary where clause.
* @param {Object} whereClause - The where clause to update the chats.
* @param {Object} data - The data to update the chats with.
* @returns {Promise<void>}
*/
markThreadHistoryInvalidV2: async function (whereClause = {}) {
if (!whereClause) return;
try {
await prisma.workspace_chats.updateMany({
where: whereClause,
data: {
include: false,
},
});
return;
} catch (error) {
console.error(error.message);
}
},
get: async function (clause = {}, limit = null, orderBy = null) {
try {
const chat = await prisma.workspace_chats.findFirst({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return chat || null;
} catch (error) {
console.error(error.message);
return null;
}
},
delete: async function (clause = {}) {
try {
await prisma.workspace_chats.deleteMany({
where: clause,
});
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
where: async function (
clause = {},
limit = null,
orderBy = null,
offset = null
) {
try {
const chats = await prisma.workspace_chats.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(offset !== null ? { skip: offset } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return chats;
} catch (error) {
console.error(error.message);
return [];
}
},
count: async function (clause = {}) {
try {
const count = await prisma.workspace_chats.count({
where: clause,
});
return count;
} catch (error) {
console.error(error.message);
return 0;
}
},
whereWithData: async function (
clause = {},
limit = null,
offset = null,
orderBy = null
) {
const { Workspace } = require("./workspace");
const { User } = require("./user");
try {
const results = await this.where(clause, limit, orderBy, offset);
for (const res of results) {
const workspace = await Workspace.get({ id: res.workspaceId });
res.workspace = workspace
? { name: workspace.name, slug: workspace.slug }
: { name: "deleted workspace", slug: null };
const user = res.user_id ? await User.get({ id: res.user_id }) : null;
res.user = user
? { username: user.username }
: { username: res.api_session_id !== null ? "API" : "unknown user" };
}
return results;
} catch (error) {
console.error(error.message);
return [];
}
},
updateFeedbackScore: async function (chatId = null, feedbackScore = null) {
if (!chatId) return;
try {
await prisma.workspace_chats.update({
where: {
id: Number(chatId),
},
data: {
feedbackScore:
feedbackScore === null ? null : Number(feedbackScore) === 1,
},
});
return;
} catch (error) {
console.error(error.message);
}
},
// Explicit update of settings + key validations.
// Only use this method when directly setting a key value
// that takes no user input for the keys being modified.
_update: async function (id = null, data = {}) {
if (!id) throw new Error("No workspace chat id provided for update");
try {
await prisma.workspace_chats.update({
where: { id },
data,
});
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
bulkCreate: async function (chatsData) {
// TODO: Replace with createMany when we update prisma to latest version
// The version of prisma that we are currently using does not support createMany with SQLite
try {
const createdChats = [];
for (const chatData of chatsData) {
const chat = await prisma.workspace_chats.create({
data: chatData,
});
createdChats.push(chat);
}
return { chats: createdChats, message: null };
} catch (error) {
console.error(error.message);
return { chats: null, message: error.message };
}
},
};
module.exports = { WorkspaceChats };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/workspaceUsers.js | server/models/workspaceUsers.js | const prisma = require("../utils/prisma");
const WorkspaceUser = {
createMany: async function (userId, workspaceIds = []) {
if (workspaceIds.length === 0) return;
try {
await prisma.$transaction(
workspaceIds.map((workspaceId) =>
prisma.workspace_users.create({
data: { user_id: userId, workspace_id: workspaceId },
})
)
);
} catch (error) {
console.error(error.message);
}
return;
},
/**
* Create many workspace users.
* @param {Array<number>} userIds - An array of user IDs to create workspace users for.
* @param {number} workspaceId - The ID of the workspace to create workspace users for.
* @returns {Promise<void>} A promise that resolves when the workspace users are created.
*/
createManyUsers: async function (userIds = [], workspaceId) {
if (userIds.length === 0) return;
try {
await prisma.$transaction(
userIds.map((userId) =>
prisma.workspace_users.create({
data: {
user_id: Number(userId),
workspace_id: Number(workspaceId),
},
})
)
);
} catch (error) {
console.error(error.message);
}
return;
},
create: async function (userId = 0, workspaceId = 0) {
try {
await prisma.workspace_users.create({
data: { user_id: Number(userId), workspace_id: Number(workspaceId) },
});
return true;
} catch (error) {
console.error(
"FAILED TO CREATE WORKSPACE_USER RELATIONSHIP.",
error.message
);
return false;
}
},
get: async function (clause = {}) {
try {
const result = await prisma.workspace_users.findFirst({ where: clause });
return result || null;
} catch (error) {
console.error(error.message);
return null;
}
},
where: async function (clause = {}, limit = null) {
try {
const results = await prisma.workspace_users.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
});
return results;
} catch (error) {
console.error(error.message);
return [];
}
},
count: async function (clause = {}) {
try {
const count = await prisma.workspace_users.count({ where: clause });
return count;
} catch (error) {
console.error(error.message);
return 0;
}
},
delete: async function (clause = {}) {
try {
await prisma.workspace_users.deleteMany({ where: clause });
} catch (error) {
console.error(error.message);
}
return;
},
};
module.exports.WorkspaceUser = WorkspaceUser;
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/systemSettings.js | server/models/systemSettings.js | process.env.NODE_ENV === "development"
? require("dotenv").config({ path: `.env.${process.env.NODE_ENV}` })
: require("dotenv").config();
const { default: slugify } = require("slugify");
const { isValidUrl, safeJsonParse } = require("../utils/http");
const prisma = require("../utils/prisma");
const { v4 } = require("uuid");
const { MetaGenerator } = require("../utils/boot/MetaGenerator");
const { PGVector } = require("../utils/vectorDbProviders/pgvector");
const { NativeEmbedder } = require("../utils/EmbeddingEngines/native");
const { getBaseLLMProviderModel } = require("../utils/helpers");
function isNullOrNaN(value) {
if (value === null) return true;
return isNaN(value);
}
const SystemSettings = {
/** A default system prompt that is used when no other system prompt is set or available to the function caller. */
saneDefaultSystemPrompt:
"Given the following conversation, relevant context, and a follow up question, reply with an answer to the current question the user is asking. Return only your response to the question given the above information following the users instructions as needed.",
protectedFields: ["multi_user_mode", "hub_api_key"],
publicFields: [
"footer_data",
"support_email",
"text_splitter_chunk_size",
"text_splitter_chunk_overlap",
"max_embed_chunk_size",
"agent_search_provider",
"agent_sql_connections",
"default_agent_skills",
"disabled_agent_skills",
"imported_agent_skills",
"custom_app_name",
"feature_flags",
"meta_page_title",
"meta_page_favicon",
],
supportedFields: [
"logo_filename",
"telemetry_id",
"footer_data",
"support_email",
"text_splitter_chunk_size",
"text_splitter_chunk_overlap",
"agent_search_provider",
"default_agent_skills",
"disabled_agent_skills",
"agent_sql_connections",
"custom_app_name",
"default_system_prompt",
// Meta page customization
"meta_page_title",
"meta_page_favicon",
// beta feature flags
"experimental_live_file_sync",
// Hub settings
"hub_api_key",
],
validations: {
footer_data: (updates) => {
try {
const array = JSON.parse(updates)
.filter((setting) => isValidUrl(setting.url))
.slice(0, 3); // max of 3 items in footer.
return JSON.stringify(array);
} catch (e) {
console.error(`Failed to run validation function on footer_data`);
return JSON.stringify([]);
}
},
text_splitter_chunk_size: (update) => {
try {
if (isNullOrNaN(update)) throw new Error("Value is not a number.");
if (Number(update) <= 0) throw new Error("Value must be non-zero.");
const { purgeEntireVectorCache } = require("../utils/files");
purgeEntireVectorCache();
return Number(update);
} catch (e) {
console.error(
`Failed to run validation function on text_splitter_chunk_size`,
e.message
);
return 1000;
}
},
text_splitter_chunk_overlap: (update) => {
try {
if (isNullOrNaN(update)) throw new Error("Value is not a number");
if (Number(update) < 0) throw new Error("Value cannot be less than 0.");
const { purgeEntireVectorCache } = require("../utils/files");
purgeEntireVectorCache();
return Number(update);
} catch (e) {
console.error(
`Failed to run validation function on text_splitter_chunk_overlap`,
e.message
);
return 20;
}
},
agent_search_provider: (update) => {
try {
if (update === "none") return null;
if (
![
"google-search-engine",
"serpapi",
"searchapi",
"serper-dot-dev",
"bing-search",
"serply-engine",
"searxng-engine",
"tavily-search",
"duckduckgo-engine",
"exa-search",
].includes(update)
)
throw new Error("Invalid SERP provider.");
return String(update);
} catch (e) {
console.error(
`Failed to run validation function on agent_search_provider`,
e.message
);
return null;
}
},
default_agent_skills: (updates) => {
try {
const skills = updates.split(",").filter((skill) => !!skill);
return JSON.stringify(skills);
} catch (e) {
console.error(`Could not validate agent skills.`);
return JSON.stringify([]);
}
},
disabled_agent_skills: (updates) => {
try {
const skills = updates.split(",").filter((skill) => !!skill);
return JSON.stringify(skills);
} catch (e) {
console.error(`Could not validate disabled agent skills.`);
return JSON.stringify([]);
}
},
agent_sql_connections: async (updates) => {
const existingConnections = safeJsonParse(
(await SystemSettings.get({ label: "agent_sql_connections" }))?.value,
[]
);
try {
const updatedConnections = mergeConnections(
existingConnections,
safeJsonParse(updates, [])
);
return JSON.stringify(updatedConnections);
} catch (e) {
console.error(`Failed to merge connections`);
return JSON.stringify(existingConnections ?? []);
}
},
experimental_live_file_sync: (update) => {
if (typeof update === "boolean")
return update === true ? "enabled" : "disabled";
if (!["enabled", "disabled"].includes(update)) return "disabled";
return String(update);
},
meta_page_title: (newTitle) => {
try {
if (typeof newTitle !== "string" || !newTitle) return null;
return String(newTitle);
} catch {
return null;
} finally {
new MetaGenerator().clearConfig();
}
},
meta_page_favicon: (faviconUrl) => {
if (!faviconUrl) return null;
try {
const url = new URL(faviconUrl);
return url.toString();
} catch {
return null;
} finally {
new MetaGenerator().clearConfig();
}
},
hub_api_key: (apiKey) => {
if (!apiKey) return null;
return String(apiKey);
},
default_system_prompt: (prompt) => {
if (typeof prompt !== "string" || !prompt) return null;
if (prompt.trim() === SystemSettings.saneDefaultSystemPrompt)
return SystemSettings.saneDefaultSystemPrompt;
return String(prompt.trim());
},
},
currentSettings: async function () {
const { hasVectorCachedFiles } = require("../utils/files");
const llmProvider = process.env.LLM_PROVIDER;
const vectorDB = process.env.VECTOR_DB;
const embeddingEngine = process.env.EMBEDDING_ENGINE ?? "native";
return {
// --------------------------------------------------------
// General Settings
// --------------------------------------------------------
RequiresAuth: !!process.env.AUTH_TOKEN,
AuthToken: !!process.env.AUTH_TOKEN,
JWTSecret: !!process.env.JWT_SECRET,
StorageDir: process.env.STORAGE_DIR,
MultiUserMode: await this.isMultiUserMode(),
DisableTelemetry: process.env.DISABLE_TELEMETRY || "false",
// --------------------------------------------------------
// Embedder Provider Selection Settings & Configs
// --------------------------------------------------------
EmbeddingEngine: embeddingEngine,
HasExistingEmbeddings: await this.hasEmbeddings(), // check if they have any currently embedded documents active in workspaces.
HasCachedEmbeddings: hasVectorCachedFiles(), // check if they any currently cached embedded docs.
EmbeddingBasePath: process.env.EMBEDDING_BASE_PATH,
EmbeddingModelPref:
embeddingEngine === "native"
? NativeEmbedder._getEmbeddingModel()
: process.env.EMBEDDING_MODEL_PREF,
EmbeddingModelMaxChunkLength:
process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH,
OllamaEmbeddingBatchSize: process.env.OLLAMA_EMBEDDING_BATCH_SIZE || 1,
VoyageAiApiKey: !!process.env.VOYAGEAI_API_KEY,
GenericOpenAiEmbeddingApiKey:
!!process.env.GENERIC_OPEN_AI_EMBEDDING_API_KEY,
GenericOpenAiEmbeddingMaxConcurrentChunks:
process.env.GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS || 500,
GeminiEmbeddingApiKey: !!process.env.GEMINI_EMBEDDING_API_KEY,
// --------------------------------------------------------
// VectorDB Provider Selection Settings & Configs
// --------------------------------------------------------
VectorDB: vectorDB,
...this.vectorDBPreferenceKeys(),
// --------------------------------------------------------
// LLM Provider Selection Settings & Configs
// --------------------------------------------------------
LLMProvider: llmProvider,
LLMModel: getBaseLLMProviderModel({ provider: llmProvider }) || null,
...this.llmPreferenceKeys(),
// --------------------------------------------------------
// Whisper (Audio transcription) Selection Settings & Configs
// - Currently the only 3rd party is OpenAI, so is OPEN_AI_KEY is set
// - then it can be shared.
// --------------------------------------------------------
WhisperProvider: process.env.WHISPER_PROVIDER || "local",
WhisperModelPref:
process.env.WHISPER_MODEL_PREF || "Xenova/whisper-small",
// --------------------------------------------------------
// TTS/STT Selection Settings & Configs
// - Currently the only 3rd party is OpenAI or the native browser-built in
// --------------------------------------------------------
TextToSpeechProvider: process.env.TTS_PROVIDER || "native",
TTSOpenAIKey: !!process.env.TTS_OPEN_AI_KEY,
TTSOpenAIVoiceModel: process.env.TTS_OPEN_AI_VOICE_MODEL,
// Eleven Labs TTS
TTSElevenLabsKey: !!process.env.TTS_ELEVEN_LABS_KEY,
TTSElevenLabsVoiceModel: process.env.TTS_ELEVEN_LABS_VOICE_MODEL,
// Piper TTS
TTSPiperTTSVoiceModel:
process.env.TTS_PIPER_VOICE_MODEL ?? "en_US-hfc_female-medium",
// OpenAI Generic TTS
TTSOpenAICompatibleKey: !!process.env.TTS_OPEN_AI_COMPATIBLE_KEY,
TTSOpenAICompatibleModel: process.env.TTS_OPEN_AI_COMPATIBLE_MODEL,
TTSOpenAICompatibleVoiceModel:
process.env.TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL,
TTSOpenAICompatibleEndpoint: process.env.TTS_OPEN_AI_COMPATIBLE_ENDPOINT,
// --------------------------------------------------------
// Agent Settings & Configs
// --------------------------------------------------------
AgentGoogleSearchEngineId: process.env.AGENT_GSE_CTX || null,
AgentGoogleSearchEngineKey: !!process.env.AGENT_GSE_KEY || null,
AgentSerpApiKey: !!process.env.AGENT_SERPAPI_API_KEY || null,
AgentSerpApiEngine: process.env.AGENT_SERPAPI_ENGINE || "google",
AgentSearchApiKey: !!process.env.AGENT_SEARCHAPI_API_KEY || null,
AgentSearchApiEngine: process.env.AGENT_SEARCHAPI_ENGINE || "google",
AgentSerperApiKey: !!process.env.AGENT_SERPER_DEV_KEY || null,
AgentBingSearchApiKey: !!process.env.AGENT_BING_SEARCH_API_KEY || null,
AgentSerplyApiKey: !!process.env.AGENT_SERPLY_API_KEY || null,
AgentSearXNGApiUrl: process.env.AGENT_SEARXNG_API_URL || null,
AgentTavilyApiKey: !!process.env.AGENT_TAVILY_API_KEY || null,
AgentExaApiKey: !!process.env.AGENT_EXA_API_KEY || null,
// --------------------------------------------------------
// Compliance Settings
// --------------------------------------------------------
// Disable View Chat History for the whole instance.
DisableViewChatHistory:
"DISABLE_VIEW_CHAT_HISTORY" in process.env || false,
// --------------------------------------------------------
// Simple SSO Settings
// --------------------------------------------------------
SimpleSSOEnabled: "SIMPLE_SSO_ENABLED" in process.env || false,
SimpleSSONoLogin: "SIMPLE_SSO_NO_LOGIN" in process.env || false,
SimpleSSONoLoginRedirect: this.simpleSSO.noLoginRedirect(),
};
},
get: async function (clause = {}) {
try {
const setting = await prisma.system_settings.findFirst({ where: clause });
return setting || null;
} catch (error) {
console.error(error.message);
return null;
}
},
getValueOrFallback: async function (clause = {}, fallback = null) {
try {
return (await this.get(clause))?.value ?? fallback;
} catch (error) {
console.error(error.message);
return fallback;
}
},
where: async function (clause = {}, limit) {
try {
const settings = await prisma.system_settings.findMany({
where: clause,
take: limit || undefined,
});
return settings;
} catch (error) {
console.error(error.message);
return [];
}
},
// Can take generic keys and will pre-filter invalid keys
// from the set before sending to the explicit update function
// that will then enforce validations as well.
updateSettings: async function (updates = {}) {
const validFields = Object.keys(updates).filter((key) =>
this.supportedFields.includes(key)
);
Object.entries(updates).forEach(([key]) => {
if (validFields.includes(key)) return;
delete updates[key];
});
return this._updateSettings(updates);
},
// Explicit update of settings + key validations.
// Only use this method when directly setting a key value
// that takes no user input for the keys being modified.
_updateSettings: async function (updates = {}) {
try {
const updatePromises = [];
for (const key of Object.keys(updates)) {
let validatedValue = updates[key];
if (this.validations.hasOwnProperty(key)) {
if (this.validations[key].constructor.name === "AsyncFunction") {
validatedValue = await this.validations[key](updates[key]);
} else {
validatedValue = this.validations[key](updates[key]);
}
}
updatePromises.push(
prisma.system_settings.upsert({
where: { label: key },
update: {
value: validatedValue === null ? null : String(validatedValue),
},
create: {
label: key,
value: validatedValue === null ? null : String(validatedValue),
},
})
);
}
await Promise.all(updatePromises);
return { success: true, error: null };
} catch (error) {
console.error("FAILED TO UPDATE SYSTEM SETTINGS", error.message);
return { success: false, error: error.message };
}
},
isMultiUserMode: async function () {
try {
const setting = await this.get({ label: "multi_user_mode" });
return setting?.value === "true";
} catch (error) {
console.error(error.message);
return false;
}
},
currentLogoFilename: async function () {
try {
const setting = await this.get({ label: "logo_filename" });
return setting?.value || null;
} catch (error) {
console.error(error.message);
return null;
}
},
hasEmbeddings: async function () {
try {
const { Document } = require("./documents");
const count = await Document.count({}, 1);
return count > 0;
} catch (error) {
console.error(error.message);
return false;
}
},
vectorDBPreferenceKeys: function () {
return {
// Pinecone DB Keys
PineConeKey: !!process.env.PINECONE_API_KEY,
PineConeIndex: process.env.PINECONE_INDEX,
// Chroma DB Keys
ChromaEndpoint: process.env.CHROMA_ENDPOINT,
ChromaApiHeader: process.env.CHROMA_API_HEADER,
ChromaApiKey: !!process.env.CHROMA_API_KEY,
// ChromaCloud DB Keys
ChromaCloudApiKey: !!process.env.CHROMACLOUD_API_KEY,
ChromaCloudTenant: process.env.CHROMACLOUD_TENANT,
ChromaCloudDatabase: process.env.CHROMACLOUD_DATABASE,
// Weaviate DB Keys
WeaviateEndpoint: process.env.WEAVIATE_ENDPOINT,
WeaviateApiKey: process.env.WEAVIATE_API_KEY,
// QDrant DB Keys
QdrantEndpoint: process.env.QDRANT_ENDPOINT,
QdrantApiKey: process.env.QDRANT_API_KEY,
// Milvus DB Keys
MilvusAddress: process.env.MILVUS_ADDRESS,
MilvusUsername: process.env.MILVUS_USERNAME,
MilvusPassword: !!process.env.MILVUS_PASSWORD,
// Zilliz DB Keys
ZillizEndpoint: process.env.ZILLIZ_ENDPOINT,
ZillizApiToken: process.env.ZILLIZ_API_TOKEN,
// AstraDB Keys
AstraDBApplicationToken: process?.env?.ASTRA_DB_APPLICATION_TOKEN,
AstraDBEndpoint: process?.env?.ASTRA_DB_ENDPOINT,
// PGVector Keys
PGVectorConnectionString: !!PGVector.connectionString() || false,
PGVectorTableName: PGVector.tableName(),
};
},
llmPreferenceKeys: function () {
return {
// OpenAI Keys
OpenAiKey: !!process.env.OPEN_AI_KEY,
OpenAiModelPref: process.env.OPEN_MODEL_PREF || "gpt-4o",
// Azure + OpenAI Keys
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
AzureOpenAiModelPref: process.env.OPEN_MODEL_PREF,
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
AzureOpenAiTokenLimit: process.env.AZURE_OPENAI_TOKEN_LIMIT || 4096,
AzureOpenAiModelType: process.env.AZURE_OPENAI_MODEL_TYPE || "default",
// Anthropic Keys
AnthropicApiKey: !!process.env.ANTHROPIC_API_KEY,
AnthropicModelPref: process.env.ANTHROPIC_MODEL_PREF || "claude-2",
AnthropicCacheControl: process.env.ANTHROPIC_CACHE_CONTROL || "none",
// Gemini Keys
GeminiLLMApiKey: !!process.env.GEMINI_API_KEY,
GeminiLLMModelPref:
process.env.GEMINI_LLM_MODEL_PREF || "gemini-2.0-flash-lite",
GeminiSafetySetting:
process.env.GEMINI_SAFETY_SETTING || "BLOCK_MEDIUM_AND_ABOVE",
// LMStudio Keys
LMStudioBasePath: process.env.LMSTUDIO_BASE_PATH,
LMStudioTokenLimit: process.env.LMSTUDIO_MODEL_TOKEN_LIMIT || null,
LMStudioModelPref: process.env.LMSTUDIO_MODEL_PREF,
// LocalAI Keys
LocalAiApiKey: !!process.env.LOCAL_AI_API_KEY,
LocalAiBasePath: process.env.LOCAL_AI_BASE_PATH,
LocalAiModelPref: process.env.LOCAL_AI_MODEL_PREF,
LocalAiTokenLimit: process.env.LOCAL_AI_MODEL_TOKEN_LIMIT,
// Ollama LLM Keys
OllamaLLMAuthToken: !!process.env.OLLAMA_AUTH_TOKEN,
OllamaLLMBasePath: process.env.OLLAMA_BASE_PATH,
OllamaLLMModelPref: process.env.OLLAMA_MODEL_PREF,
OllamaLLMTokenLimit: process.env.OLLAMA_MODEL_TOKEN_LIMIT || null,
OllamaLLMKeepAliveSeconds: process.env.OLLAMA_KEEP_ALIVE_TIMEOUT ?? 300,
OllamaLLMPerformanceMode: process.env.OLLAMA_PERFORMANCE_MODE ?? "base",
// Novita LLM Keys
NovitaLLMApiKey: !!process.env.NOVITA_LLM_API_KEY,
NovitaLLMModelPref: process.env.NOVITA_LLM_MODEL_PREF,
NovitaLLMTimeout: process.env.NOVITA_LLM_TIMEOUT_MS,
// TogetherAI Keys
TogetherAiApiKey: !!process.env.TOGETHER_AI_API_KEY,
TogetherAiModelPref: process.env.TOGETHER_AI_MODEL_PREF,
// Fireworks AI API Keys
FireworksAiLLMApiKey: !!process.env.FIREWORKS_AI_LLM_API_KEY,
FireworksAiLLMModelPref: process.env.FIREWORKS_AI_LLM_MODEL_PREF,
// Perplexity AI Keys
PerplexityApiKey: !!process.env.PERPLEXITY_API_KEY,
PerplexityModelPref: process.env.PERPLEXITY_MODEL_PREF,
// OpenRouter Keys
OpenRouterApiKey: !!process.env.OPENROUTER_API_KEY,
OpenRouterModelPref: process.env.OPENROUTER_MODEL_PREF,
OpenRouterTimeout: process.env.OPENROUTER_TIMEOUT_MS,
// Mistral AI (API) Keys
MistralApiKey: !!process.env.MISTRAL_API_KEY,
MistralModelPref: process.env.MISTRAL_MODEL_PREF,
// Groq AI API Keys
GroqApiKey: !!process.env.GROQ_API_KEY,
GroqModelPref: process.env.GROQ_MODEL_PREF,
// HuggingFace Dedicated Inference
HuggingFaceLLMEndpoint: process.env.HUGGING_FACE_LLM_ENDPOINT,
HuggingFaceLLMAccessToken: !!process.env.HUGGING_FACE_LLM_API_KEY,
HuggingFaceLLMTokenLimit: process.env.HUGGING_FACE_LLM_TOKEN_LIMIT,
// KoboldCPP Keys
KoboldCPPModelPref: process.env.KOBOLD_CPP_MODEL_PREF,
KoboldCPPBasePath: process.env.KOBOLD_CPP_BASE_PATH,
KoboldCPPTokenLimit: process.env.KOBOLD_CPP_MODEL_TOKEN_LIMIT,
KoboldCPPMaxTokens: process.env.KOBOLD_CPP_MAX_TOKENS,
// Text Generation Web UI Keys
TextGenWebUIBasePath: process.env.TEXT_GEN_WEB_UI_BASE_PATH,
TextGenWebUITokenLimit: process.env.TEXT_GEN_WEB_UI_MODEL_TOKEN_LIMIT,
TextGenWebUIAPIKey: !!process.env.TEXT_GEN_WEB_UI_API_KEY,
// LiteLLM Keys
LiteLLMModelPref: process.env.LITE_LLM_MODEL_PREF,
LiteLLMTokenLimit: process.env.LITE_LLM_MODEL_TOKEN_LIMIT,
LiteLLMBasePath: process.env.LITE_LLM_BASE_PATH,
LiteLLMApiKey: !!process.env.LITE_LLM_API_KEY,
// Moonshot AI Keys
MoonshotAiApiKey: !!process.env.MOONSHOT_AI_API_KEY,
MoonshotAiModelPref:
process.env.MOONSHOT_AI_MODEL_PREF || "moonshot-v1-32k",
// Generic OpenAI Keys
GenericOpenAiBasePath: process.env.GENERIC_OPEN_AI_BASE_PATH,
GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF,
GenericOpenAiTokenLimit: process.env.GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT,
GenericOpenAiKey: !!process.env.GENERIC_OPEN_AI_API_KEY,
GenericOpenAiMaxTokens: process.env.GENERIC_OPEN_AI_MAX_TOKENS,
// Foundry Keys
FoundryBasePath: process.env.FOUNDRY_BASE_PATH,
FoundryModelPref: process.env.FOUNDRY_MODEL_PREF,
FoundryModelTokenLimit: process.env.FOUNDRY_MODEL_TOKEN_LIMIT,
AwsBedrockLLMConnectionMethod:
process.env.AWS_BEDROCK_LLM_CONNECTION_METHOD || "iam",
AwsBedrockLLMAccessKeyId: !!process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID,
AwsBedrockLLMAccessKey: !!process.env.AWS_BEDROCK_LLM_ACCESS_KEY,
AwsBedrockLLMSessionToken: !!process.env.AWS_BEDROCK_LLM_SESSION_TOKEN,
AwsBedrockLLMAPIKey: !!process.env.AWS_BEDROCK_LLM_API_KEY,
AwsBedrockLLMRegion: process.env.AWS_BEDROCK_LLM_REGION,
AwsBedrockLLMModel: process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE,
AwsBedrockLLMTokenLimit:
process.env.AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT || 8192,
AwsBedrockLLMMaxOutputTokens:
process.env.AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS || 4096,
// Cohere API Keys
CohereApiKey: !!process.env.COHERE_API_KEY,
CohereModelPref: process.env.COHERE_MODEL_PREF,
// DeepSeek API Keys
DeepSeekApiKey: !!process.env.DEEPSEEK_API_KEY,
DeepSeekModelPref: process.env.DEEPSEEK_MODEL_PREF,
// APIPie LLM API Keys
ApipieLLMApiKey: !!process.env.APIPIE_LLM_API_KEY,
ApipieLLMModelPref: process.env.APIPIE_LLM_MODEL_PREF,
// xAI LLM API Keys
XAIApiKey: !!process.env.XAI_LLM_API_KEY,
XAIModelPref: process.env.XAI_LLM_MODEL_PREF,
// NVIDIA NIM Keys
NvidiaNimLLMBasePath: process.env.NVIDIA_NIM_LLM_BASE_PATH,
NvidiaNimLLMModelPref: process.env.NVIDIA_NIM_LLM_MODEL_PREF,
NvidiaNimLLMTokenLimit: process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT,
// PPIO API keys
PPIOApiKey: !!process.env.PPIO_API_KEY,
PPIOModelPref: process.env.PPIO_MODEL_PREF,
// Dell Pro AI Studio Keys
DellProAiStudioBasePath: process.env.DPAIS_LLM_BASE_PATH,
DellProAiStudioModelPref: process.env.DPAIS_LLM_MODEL_PREF,
DellProAiStudioTokenLimit:
process.env.DPAIS_LLM_MODEL_TOKEN_LIMIT ?? 4096,
// CometAPI LLM Keys
CometApiLLMApiKey: !!process.env.COMETAPI_LLM_API_KEY,
CometApiLLMModelPref: process.env.COMETAPI_LLM_MODEL_PREF,
CometApiLLMTimeout: process.env.COMETAPI_LLM_TIMEOUT_MS,
// Z.AI Keys
ZAiApiKey: !!process.env.ZAI_API_KEY,
ZAiModelPref: process.env.ZAI_MODEL_PREF,
// GiteeAI API Keys
GiteeAIApiKey: !!process.env.GITEE_AI_API_KEY,
GiteeAIModelPref: process.env.GITEE_AI_MODEL_PREF,
GiteeAITokenLimit: process.env.GITEE_AI_MODEL_TOKEN_LIMIT || 8192,
};
},
// For special retrieval of a key setting that does not expose any credential information
brief: {
agent_sql_connections: async function () {
const setting = await SystemSettings.get({
label: "agent_sql_connections",
});
if (!setting) return [];
return safeJsonParse(setting.value, []).map((dbConfig) => {
const { connectionString, ...rest } = dbConfig;
return rest;
});
},
},
getFeatureFlags: async function () {
return {
experimental_live_file_sync:
(await SystemSettings.get({ label: "experimental_live_file_sync" }))
?.value === "enabled",
};
},
/**
* Get user configured Community Hub Settings
* Connection key is used to authenticate with the Community Hub API
* for your account.
* @returns {Promise<{connectionKey: string}>}
*/
hubSettings: async function () {
try {
const hubKey = await this.get({ label: "hub_api_key" });
return { connectionKey: hubKey?.value || null };
} catch (error) {
console.error(error.message);
return { connectionKey: null };
}
},
simpleSSO: {
/**
* Gets the no login redirect URL. If the conditions below are not met, this will return null.
* - If simple SSO is not enabled.
* - If simple SSO login page is not disabled.
* - If the no login redirect is not a valid URL or is not set.
* @returns {string | null}
*/
noLoginRedirect: () => {
if (!("SIMPLE_SSO_ENABLED" in process.env)) return null; // if simple SSO is not enabled, return null
if (!("SIMPLE_SSO_NO_LOGIN" in process.env)) return null; // if the no login config is not set, return null
if (!("SIMPLE_SSO_NO_LOGIN_REDIRECT" in process.env)) return null; // if the no login redirect is not set, return null
try {
let url = new URL(process.env.SIMPLE_SSO_NO_LOGIN_REDIRECT);
return url.toString();
} catch {}
// if the no login redirect is not a valid URL or is not set, return null
return null;
},
},
};
function mergeConnections(existingConnections = [], updates = []) {
let updatedConnections = [...existingConnections];
const existingDbIds = existingConnections.map((conn) => conn.database_id);
// First remove all 'action:remove' candidates from existing connections.
const toRemove = updates
.filter((conn) => conn.action === "remove")
.map((conn) => conn.database_id);
updatedConnections = updatedConnections.filter(
(conn) => !toRemove.includes(conn.database_id)
);
// Next add all 'action:add' candidates into the updatedConnections; We DO NOT validate the connection strings.
// but we do validate their database_id is unique.
updates
.filter((conn) => conn.action === "add")
.forEach((update) => {
if (!update.connectionString) return; // invalid connection string
// Remap name to be unique to entire set.
if (existingDbIds.includes(update.database_id)) {
update.database_id = slugify(
`${update.database_id}-${v4().slice(0, 4)}`
);
} else {
update.database_id = slugify(update.database_id);
}
updatedConnections.push({
engine: update.engine,
database_id: update.database_id,
connectionString: update.connectionString,
});
});
return updatedConnections;
}
module.exports.SystemSettings = SystemSettings;
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/documentSyncRun.js | server/models/documentSyncRun.js | const prisma = require("../utils/prisma");
const DocumentSyncRun = {
statuses: {
unknown: "unknown",
exited: "exited",
failed: "failed",
success: "success",
},
save: async function (queueId = null, status = null, result = {}) {
try {
if (!this.statuses.hasOwnProperty(status))
throw new Error(
`DocumentSyncRun status ${status} is not a valid status.`
);
const run = await prisma.document_sync_executions.create({
data: {
queueId: Number(queueId),
status: String(status),
result: JSON.stringify(result),
},
});
return run || null;
} catch (error) {
console.error(error.message);
return null;
}
},
get: async function (clause = {}) {
try {
const queue = await prisma.document_sync_executions.findFirst({
where: clause,
});
return queue || null;
} catch (error) {
console.error(error.message);
return null;
}
},
where: async function (
clause = {},
limit = null,
orderBy = null,
include = {}
) {
try {
const results = await prisma.document_sync_executions.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
...(include !== null ? { include } : {}),
});
return results;
} catch (error) {
console.error(error.message);
return [];
}
},
count: async function (clause = {}, limit = null, orderBy = {}) {
try {
const count = await prisma.document_sync_executions.count({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return count;
} catch (error) {
console.error("FAILED TO COUNT DOCUMENTS.", error.message);
return 0;
}
},
delete: async function (clause = {}) {
try {
await prisma.document_sync_executions.deleteMany({ where: clause });
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
};
module.exports = { DocumentSyncRun };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/workspaceParsedFiles.js | server/models/workspaceParsedFiles.js | const prisma = require("../utils/prisma");
const { EventLogs } = require("./eventLogs");
const { Document } = require("./documents");
const { documentsPath, directUploadsPath } = require("../utils/files");
const { safeJsonParse } = require("../utils/http");
const fs = require("fs");
const path = require("path");
const WorkspaceParsedFiles = {
create: async function ({
filename,
workspaceId,
userId = null,
threadId = null,
metadata = null,
tokenCountEstimate = 0,
}) {
try {
const file = await prisma.workspace_parsed_files.create({
data: {
filename,
workspaceId: parseInt(workspaceId),
userId: userId ? parseInt(userId) : null,
threadId: threadId ? parseInt(threadId) : null,
metadata,
tokenCountEstimate,
},
});
await EventLogs.logEvent(
"workspace_file_uploaded",
{
filename,
workspaceId,
},
userId
);
return { file, error: null };
} catch (error) {
console.error("FAILED TO CREATE PARSED FILE RECORD.", error.message);
return { file: null, error: error.message };
}
},
get: async function (clause = {}) {
try {
const file = await prisma.workspace_parsed_files.findFirst({
where: clause,
});
return file;
} catch (error) {
console.error(error.message);
return null;
}
},
where: async function (
clause = {},
limit = null,
orderBy = null,
select = null
) {
try {
const files = await prisma.workspace_parsed_files.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
...(select !== null ? { select } : {}),
});
return files;
} catch (error) {
console.error(error.message);
return [];
}
},
delete: async function (clause = {}) {
try {
await prisma.workspace_parsed_files.deleteMany({
where: clause,
});
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
totalTokenCount: async function (clause = {}) {
const { _sum } = await prisma.workspace_parsed_files.aggregate({
where: clause,
_sum: { tokenCountEstimate: true },
});
return _sum.tokenCountEstimate || 0;
},
moveToDocumentsAndEmbed: async function (fileId, workspace) {
try {
const parsedFile = await this.get({ id: parseInt(fileId) });
if (!parsedFile) throw new Error("File not found");
// Get file location from metadata
const metadata = safeJsonParse(parsedFile.metadata, {});
const location = metadata.location;
if (!location) throw new Error("No file location in metadata");
// Get file from metadata location
const sourceFile = path.join(directUploadsPath, path.basename(location));
if (!fs.existsSync(sourceFile)) throw new Error("Source file not found");
// Move to custom-documents
const customDocsPath = path.join(documentsPath, "custom-documents");
if (!fs.existsSync(customDocsPath))
fs.mkdirSync(customDocsPath, { recursive: true });
// Copy the file to custom-documents
const targetPath = path.join(customDocsPath, path.basename(location));
fs.copyFileSync(sourceFile, targetPath);
fs.unlinkSync(sourceFile);
const {
failedToEmbed = [],
errors = [],
embedded = [],
} = await Document.addDocuments(
workspace,
[`custom-documents/${path.basename(location)}`],
parsedFile.userId
);
if (failedToEmbed.length > 0)
throw new Error(errors[0] || "Failed to embed document");
const document = await Document.get({
workspaceId: workspace.id,
docpath: embedded[0],
});
return { success: true, error: null, document };
} catch (error) {
console.error("Failed to move and embed file:", error);
return { success: false, error: error.message, document: null };
} finally {
// Always delete the file after processing
await this.delete({ id: parseInt(fileId) });
}
},
getContextMetadataAndLimits: async function (
workspace,
thread = null,
user = null
) {
try {
if (!workspace) throw new Error("Workspace is required");
const files = await this.where({
workspaceId: workspace.id,
threadId: thread?.id || null,
...(user ? { userId: user.id } : {}),
});
const results = [];
let totalTokens = 0;
for (const file of files) {
const metadata = safeJsonParse(file.metadata, {});
totalTokens += file.tokenCountEstimate || 0;
results.push({
id: file.id,
title: metadata.title || metadata.location,
location: metadata.location,
token_count_estimate: file.tokenCountEstimate,
});
}
return {
files: results,
contextWindow: workspace.contextWindow,
currentContextTokenCount: totalTokens,
};
} catch (error) {
console.error("Failed to get context metadata:", error);
return {
files: [],
contextWindow: Infinity,
currentContextTokenCount: 0,
};
}
},
getContextFiles: async function (workspace, thread = null, user = null) {
try {
const files = await this.where({
workspaceId: workspace.id,
threadId: thread?.id || null,
...(user ? { userId: user.id } : {}),
});
const results = [];
for (const file of files) {
const metadata = safeJsonParse(file.metadata, {});
const location = metadata.location;
if (!location) continue;
const sourceFile = path.join(
directUploadsPath,
path.basename(location)
);
if (!fs.existsSync(sourceFile)) continue;
const content = fs.readFileSync(sourceFile, "utf-8");
const data = safeJsonParse(content, null);
if (!data?.pageContent) continue;
results.push({
pageContent: data.pageContent,
token_count_estimate: file.tokenCountEstimate,
...metadata,
});
}
return results;
} catch (error) {
console.error("Failed to get context files:", error);
return [];
}
},
};
module.exports = { WorkspaceParsedFiles };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/workspaceAgentInvocation.js | server/models/workspaceAgentInvocation.js | const prisma = require("../utils/prisma");
const { v4: uuidv4 } = require("uuid");
const WorkspaceAgentInvocation = {
// returns array of strings with their @ handle.
// must start with @agent for now.
parseAgents: function (promptString) {
if (!promptString.startsWith("@agent")) return [];
return promptString.split(/\s+/).filter((v) => v.startsWith("@"));
},
close: async function (uuid) {
if (!uuid) return;
try {
await prisma.workspace_agent_invocations.update({
where: { uuid: String(uuid) },
data: { closed: true },
});
} catch {}
},
new: async function ({ prompt, workspace, user = null, thread = null }) {
try {
const invocation = await prisma.workspace_agent_invocations.create({
data: {
uuid: uuidv4(),
workspace_id: workspace.id,
prompt: String(prompt),
user_id: user?.id,
thread_id: thread?.id,
},
});
return { invocation, message: null };
} catch (error) {
console.error(error.message);
return { invocation: null, message: error.message };
}
},
get: async function (clause = {}) {
try {
const invocation = await prisma.workspace_agent_invocations.findFirst({
where: clause,
});
return invocation || null;
} catch (error) {
console.error(error.message);
return null;
}
},
getWithWorkspace: async function (clause = {}) {
try {
const invocation = await prisma.workspace_agent_invocations.findFirst({
where: clause,
include: {
workspace: true,
},
});
return invocation || null;
} catch (error) {
console.error(error.message);
return null;
}
},
delete: async function (clause = {}) {
try {
await prisma.workspace_agent_invocations.delete({
where: clause,
});
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
where: async function (clause = {}, limit = null, orderBy = null) {
try {
const results = await prisma.workspace_agent_invocations.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return results;
} catch (error) {
console.error(error.message);
return [];
}
},
};
module.exports = { WorkspaceAgentInvocation };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/communityHub.js | server/models/communityHub.js | const ImportedPlugin = require("../utils/agents/imported");
/**
* An interface to the AnythingLLM Community Hub external API.
*/
const CommunityHub = {
importPrefix: "allm-community-id",
apiBase:
process.env.NODE_ENV === "development"
? "http://127.0.0.1:5001/anythingllm-hub/us-central1/external/v1"
: "https://hub.external.anythingllm.com/v1",
supportedStaticItemTypes: ["system-prompt", "agent-flow", "slash-command"],
/**
* Validate an import ID and return the entity type and ID.
* @param {string} importId - The import ID to validate.
* @returns {{entityType: string | null, entityId: string | null}}
*/
validateImportId: function (importId) {
if (
!importId ||
!importId.startsWith(this.importPrefix) ||
importId.split(":").length !== 3
)
return { entityType: null, entityId: null };
const [_, entityType, entityId] = importId.split(":");
if (!entityType || !entityId) return { entityType: null, entityId: null };
return {
entityType: String(entityType).trim(),
entityId: String(entityId).trim(),
};
},
/**
* Fetch the explore items from the community hub that are publicly available.
* @returns {Promise<{agentSkills: {items: [], hasMore: boolean, totalCount: number}, systemPrompts: {items: [], hasMore: boolean, totalCount: number}, slashCommands: {items: [], hasMore: boolean, totalCount: number}}>}
*/
fetchExploreItems: async function () {
return await fetch(`${this.apiBase}/explore`, {
method: "GET",
})
.then((response) => response.json())
.catch((error) => {
console.error("Error fetching explore items:", error);
return {
agentSkills: {
items: [],
hasMore: false,
totalCount: 0,
},
systemPrompts: {
items: [],
hasMore: false,
totalCount: 0,
},
slashCommands: {
items: [],
hasMore: false,
totalCount: 0,
},
};
});
},
/**
* Fetch a bundle item from the community hub.
* Bundle items are entities that require a downloadURL to be fetched from the community hub.
* so we can unzip and import them to the AnythingLLM instance.
* @param {string} importId - The import ID of the item.
* @returns {Promise<{url: string | null, item: object | null, error: string | null}>}
*/
getBundleItem: async function (importId) {
const { entityType, entityId } = this.validateImportId(importId);
if (!entityType || !entityId)
return { item: null, error: "Invalid import ID" };
const { SystemSettings } = require("./systemSettings");
const { connectionKey } = await SystemSettings.hubSettings();
const { url, item, error } = await fetch(
`${this.apiBase}/${entityType}/${entityId}/pull`,
{
method: "GET",
headers: {
"Content-Type": "application/json",
...(connectionKey
? { Authorization: `Bearer ${connectionKey}` }
: {}),
},
}
)
.then((response) => response.json())
.catch((error) => {
console.error(
`Error fetching bundle item for import ID ${importId}:`,
error
);
return { url: null, item: null, error: error.message };
});
return { url, item, error };
},
/**
* Apply an item to the AnythingLLM instance. Used for simple items like slash commands and system prompts.
* @param {object} item - The item to apply.
* @param {object} options - Additional options for applying the item.
* @param {object|null} options.currentUser - The current user object.
* @returns {Promise<{success: boolean, error: string | null}>}
*/
applyItem: async function (item, options = {}) {
if (!item) return { success: false, error: "Item is required" };
if (item.itemType === "system-prompt") {
if (!options?.workspaceSlug)
return { success: false, error: "Workspace slug is required" };
const { Workspace } = require("./workspace");
const workspace = await Workspace.get({
slug: String(options.workspaceSlug),
});
if (!workspace) return { success: false, error: "Workspace not found" };
await Workspace.update(workspace.id, { openAiPrompt: item.prompt });
return { success: true, error: null };
}
if (item.itemType === "slash-command") {
const { SlashCommandPresets } = require("./slashCommandsPresets");
await SlashCommandPresets.create(options?.currentUser?.id, {
command: SlashCommandPresets.formatCommand(String(item.command)),
prompt: String(item.prompt),
description: String(item.description),
});
return { success: true, error: null };
}
return {
success: false,
error: "Unsupported item type. Nothing to apply.",
};
},
/**
* Import a bundle item to the AnythingLLM instance by downloading the zip file and importing it.
* or whatever the item type requires.
* @param {{url: string, item: object}} params
* @returns {Promise<{success: boolean, error: string | null}>}
*/
importBundleItem: async function ({ url, item }) {
if (item.itemType === "agent-skill") {
const { success, error } =
await ImportedPlugin.importCommunityItemFromUrl(url, item);
return { success, error };
}
return {
success: false,
error: "Unsupported item type. Nothing to import.",
};
},
fetchUserItems: async function (connectionKey) {
if (!connectionKey) return { createdByMe: {}, teamItems: [] };
return await fetch(`${this.apiBase}/items`, {
method: "GET",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${connectionKey}`,
},
})
.then((response) => response.json())
.catch((error) => {
console.error("Error fetching user items:", error);
return { createdByMe: {}, teamItems: [] };
});
},
/**
* Create a new item in the community hub - Only supports STATIC items for now.
* @param {string} itemType - The type of item to create
* @param {object} data - The item data
* @param {string} connectionKey - The hub connection key
* @returns {Promise<{success: boolean, error: string | null}>}
*/
createStaticItem: async function (itemType, data, connectionKey) {
if (!connectionKey)
return { success: false, error: "Connection key is required" };
if (!this.supportedStaticItemTypes.includes(itemType))
return { success: false, error: "Unsupported item type" };
// If the item has special considerations or preprocessing, we can delegate that below before sending the request.
// eg: Agent flow files and such.
return await fetch(`${this.apiBase}/${itemType}/create`, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${connectionKey}`,
},
body: JSON.stringify(data),
})
.then((response) => response.json())
.then((result) => {
if (!!result.error) throw new Error(result.error || "Unknown error");
return { success: true, error: null, itemId: result.item.id };
})
.catch((error) => {
console.error(`Error creating ${itemType}:`, error);
return { success: false, error: error.message };
});
},
};
module.exports = { CommunityHub };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/browserExtensionApiKey.js | server/models/browserExtensionApiKey.js | const prisma = require("../utils/prisma");
const { SystemSettings } = require("./systemSettings");
const { ROLES } = require("../utils/middleware/multiUserProtected");
const BrowserExtensionApiKey = {
/**
* Creates a new secret for a browser extension API key.
* @returns {string} brx-*** API key to use with extension
*/
makeSecret: () => {
const uuidAPIKey = require("uuid-apikey");
return `brx-${uuidAPIKey.create().apiKey}`;
},
/**
* Creates a new api key for the browser Extension
* @param {number|null} userId - User id to associate creation of key with.
* @returns {Promise<{apiKey: import("@prisma/client").browser_extension_api_keys|null, error:string|null}>}
*/
create: async function (userId = null) {
try {
const apiKey = await prisma.browser_extension_api_keys.create({
data: {
key: this.makeSecret(),
user_id: userId,
},
});
return { apiKey, error: null };
} catch (error) {
console.error("Failed to create browser extension API key", error);
return { apiKey: null, error: error.message };
}
},
/**
* Validated existing API key
* @param {string} key
* @returns {Promise<{apiKey: import("@prisma/client").browser_extension_api_keys|boolean}>}
*/
validate: async function (key) {
if (!key.startsWith("brx-")) return false;
const apiKey = await prisma.browser_extension_api_keys.findUnique({
where: { key: key.toString() },
include: { user: true },
});
if (!apiKey) return false;
const multiUserMode = await SystemSettings.isMultiUserMode();
if (!multiUserMode) return apiKey; // In single-user mode, all keys are valid
// In multi-user mode, check if the key is associated with a user
return apiKey.user_id ? apiKey : false;
},
/**
* Fetches browser api key by params.
* @param {object} clause - Prisma props for search
* @returns {Promise<{apiKey: import("@prisma/client").browser_extension_api_keys|boolean}>}
*/
get: async function (clause = {}) {
try {
const apiKey = await prisma.browser_extension_api_keys.findFirst({
where: clause,
});
return apiKey;
} catch (error) {
console.error("FAILED TO GET BROWSER EXTENSION API KEY.", error.message);
return null;
}
},
/**
* Deletes browser api key by db id.
* @param {number} id - database id of browser key
* @returns {Promise<{success: boolean, error:string|null}>}
*/
delete: async function (id) {
try {
await prisma.browser_extension_api_keys.delete({
where: { id: parseInt(id) },
});
return { success: true, error: null };
} catch (error) {
console.error("Failed to delete browser extension API key", error);
return { success: false, error: error.message };
}
},
/**
* Gets browser keys by params
* @param {object} clause
* @param {number|null} limit
* @param {object|null} orderBy
* @returns {Promise<import("@prisma/client").browser_extension_api_keys[]>}
*/
where: async function (clause = {}, limit = null, orderBy = null) {
try {
const apiKeys = await prisma.browser_extension_api_keys.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
include: { user: true },
});
return apiKeys;
} catch (error) {
console.error("FAILED TO GET BROWSER EXTENSION API KEYS.", error.message);
return [];
}
},
/**
* Get browser API keys for user
* @param {import("@prisma/client").users} user
* @param {object} clause
* @param {number|null} limit
* @param {object|null} orderBy
* @returns {Promise<import("@prisma/client").browser_extension_api_keys[]>}
*/
whereWithUser: async function (
user,
clause = {},
limit = null,
orderBy = null
) {
// Admin can view and use any keys
if ([ROLES.admin].includes(user.role))
return await this.where(clause, limit, orderBy);
try {
const apiKeys = await prisma.browser_extension_api_keys.findMany({
where: {
...clause,
user_id: user.id,
},
include: { user: true },
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return apiKeys;
} catch (error) {
console.error(error.message);
return [];
}
},
/**
* Updates owner of all DB ids to new admin.
* @param {number} userId
* @returns {Promise<void>}
*/
migrateApiKeysToMultiUser: async function (userId) {
try {
await prisma.browser_extension_api_keys.updateMany({
where: {
user_id: null,
},
data: {
user_id: userId,
},
});
console.log("Successfully migrated API keys to multi-user mode");
} catch (error) {
console.error("Error migrating API keys to multi-user mode:", error);
}
},
};
module.exports = { BrowserExtensionApiKey };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/documents.js | server/models/documents.js | const { v4: uuidv4 } = require("uuid");
const { getVectorDbClass } = require("../utils/helpers");
const prisma = require("../utils/prisma");
const { Telemetry } = require("./telemetry");
const { EventLogs } = require("./eventLogs");
const { safeJsonParse } = require("../utils/http");
const { getModelTag } = require("../endpoints/utils");
const Document = {
writable: ["pinned", "watched", "lastUpdatedAt"],
/**
* @param {import("@prisma/client").workspace_documents} document - Document PrismaRecord
* @returns {{
* metadata: (null|object),
* type: import("./documentSyncQueue.js").validFileType,
* source: string
* }}
*/
parseDocumentTypeAndSource: function (document) {
const metadata = safeJsonParse(document.metadata, null);
if (!metadata) return { metadata: null, type: null, source: null };
// Parse the correct type of source and its original source path.
const idx = metadata.chunkSource.indexOf("://");
const [type, source] = [
metadata.chunkSource.slice(0, idx),
metadata.chunkSource.slice(idx + 3),
];
return { metadata, type, source: this._stripSource(source, type) };
},
forWorkspace: async function (workspaceId = null) {
if (!workspaceId) return [];
return await prisma.workspace_documents.findMany({
where: { workspaceId },
});
},
delete: async function (clause = {}) {
try {
await prisma.workspace_documents.deleteMany({ where: clause });
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
get: async function (clause = {}) {
try {
const document = await prisma.workspace_documents.findFirst({
where: clause,
});
return document || null;
} catch (error) {
console.error(error.message);
return null;
}
},
where: async function (
clause = {},
limit = null,
orderBy = null,
include = null,
select = null
) {
try {
const results = await prisma.workspace_documents.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
...(include !== null ? { include } : {}),
...(select !== null ? { select: { ...select } } : {}),
});
return results;
} catch (error) {
console.error(error.message);
return [];
}
},
addDocuments: async function (workspace, additions = [], userId = null) {
const VectorDb = getVectorDbClass();
if (additions.length === 0) return { failed: [], embedded: [] };
const { fileData } = require("../utils/files");
const embedded = [];
const failedToEmbed = [];
const errors = new Set();
for (const path of additions) {
const data = await fileData(path);
if (!data) continue;
const docId = uuidv4();
const { pageContent, ...metadata } = data;
const newDoc = {
docId,
filename: path.split("/")[1],
docpath: path,
workspaceId: workspace.id,
metadata: JSON.stringify(metadata),
};
const { vectorized, error } = await VectorDb.addDocumentToNamespace(
workspace.slug,
{ ...data, docId },
path
);
if (!vectorized) {
console.error(
"Failed to vectorize",
metadata?.title || newDoc.filename
);
failedToEmbed.push(metadata?.title || newDoc.filename);
errors.add(error);
continue;
}
try {
await prisma.workspace_documents.create({ data: newDoc });
embedded.push(path);
} catch (error) {
console.error(error.message);
}
}
await Telemetry.sendTelemetry("documents_embedded_in_workspace", {
LLMSelection: process.env.LLM_PROVIDER || "openai",
Embedder: process.env.EMBEDDING_ENGINE || "inherit",
VectorDbSelection: process.env.VECTOR_DB || "lancedb",
TTSSelection: process.env.TTS_PROVIDER || "native",
LLMModel: getModelTag(),
});
await EventLogs.logEvent(
"workspace_documents_added",
{
workspaceName: workspace?.name || "Unknown Workspace",
numberOfDocumentsAdded: additions.length,
},
userId
);
return { failedToEmbed, errors: Array.from(errors), embedded };
},
removeDocuments: async function (workspace, removals = [], userId = null) {
const VectorDb = getVectorDbClass();
if (removals.length === 0) return;
for (const path of removals) {
const document = await this.get({
docpath: path,
workspaceId: workspace.id,
});
if (!document) continue;
await VectorDb.deleteDocumentFromNamespace(
workspace.slug,
document.docId
);
try {
await prisma.workspace_documents.delete({
where: { id: document.id, workspaceId: workspace.id },
});
await prisma.document_vectors.deleteMany({
where: { docId: document.docId },
});
} catch (error) {
console.error(error.message);
}
}
await EventLogs.logEvent(
"workspace_documents_removed",
{
workspaceName: workspace?.name || "Unknown Workspace",
numberOfDocuments: removals.length,
},
userId
);
return true;
},
count: async function (clause = {}, limit = null) {
try {
const count = await prisma.workspace_documents.count({
where: clause,
...(limit !== null ? { take: limit } : {}),
});
return count;
} catch (error) {
console.error("FAILED TO COUNT DOCUMENTS.", error.message);
return 0;
}
},
update: async function (id = null, data = {}) {
if (!id) throw new Error("No workspace document id provided for update");
const validKeys = Object.keys(data).filter((key) =>
this.writable.includes(key)
);
if (validKeys.length === 0)
return { document: { id }, message: "No valid fields to update!" };
try {
const document = await prisma.workspace_documents.update({
where: { id },
data,
});
return { document, message: null };
} catch (error) {
console.error(error.message);
return { document: null, message: error.message };
}
},
_updateAll: async function (clause = {}, data = {}) {
try {
await prisma.workspace_documents.updateMany({
where: clause,
data,
});
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
content: async function (docId) {
if (!docId) throw new Error("No workspace docId provided!");
const document = await this.get({ docId: String(docId) });
if (!document) throw new Error(`Could not find a document by id ${docId}`);
const { fileData } = require("../utils/files");
const data = await fileData(document.docpath);
return { title: data.title, content: data.pageContent };
},
contentByDocPath: async function (docPath) {
const { fileData } = require("../utils/files");
const data = await fileData(docPath);
return { title: data.title, content: data.pageContent };
},
// Some data sources have encoded params in them we don't want to log - so strip those details.
_stripSource: function (sourceString, type) {
if (["confluence", "github"].includes(type)) {
const _src = new URL(sourceString);
_src.search = ""; // remove all search params that are encoded for resync.
return _src.toString();
}
return sourceString;
},
/**
* Functions for the backend API endpoints - not to be used by the frontend or elsewhere.
* @namespace api
*/
api: {
/**
* Process a document upload from the API and upsert it into the database. This
* functionality should only be used by the backend /v1/documents/upload endpoints for post-upload embedding.
* @param {string} wsSlugs - The slugs of the workspaces to embed the document into, will be comma-separated list of workspace slugs
* @param {string} docLocation - The location/path of the document that was uploaded
* @returns {Promise<boolean>} - True if the document was uploaded successfully, false otherwise
*/
uploadToWorkspace: async function (wsSlugs = "", docLocation = null) {
if (!docLocation)
return console.log(
"No document location provided for embedding",
docLocation
);
const slugs = wsSlugs
.split(",")
.map((slug) => String(slug)?.trim()?.toLowerCase());
if (slugs.length === 0)
return console.log(`No workspaces provided got: ${wsSlugs}`);
const { Workspace } = require("./workspace");
const workspaces = await Workspace.where({ slug: { in: slugs } });
if (workspaces.length === 0)
return console.log("No valid workspaces found for slugs: ", slugs);
// Upsert the document into each workspace - do this sequentially
// because the document may be large and we don't want to overwhelm the embedder, plus on the first
// upsert we will then have the cache of the document - making n+1 embeds faster. If we parallelize this
// we will have to do a lot of extra work to ensure that the document is not embedded more than once.
for (const workspace of workspaces) {
const { failedToEmbed = [], errors = [] } = await Document.addDocuments(
workspace,
[docLocation]
);
if (failedToEmbed.length > 0)
return console.log(
`Failed to embed document into workspace ${workspace.slug}`,
errors
);
console.log(`Document embedded into workspace ${workspace.slug}...`);
}
return true;
},
},
};
module.exports = { Document };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/telemetry.js | server/models/telemetry.js | const { v4 } = require("uuid");
const { SystemSettings } = require("./systemSettings");
// Map of events and last sent time to check if the event is on cooldown
// This will be cleared on server restart - but that is fine since it is mostly to just
// prevent spamming the logs.
const TelemetryCooldown = new Map();
const Telemetry = {
// Write-only key. It can't read events or any of your other data, so it's safe to use in public apps.
pubkey: "phc_9qu7QLpV8L84P3vFmEiZxL020t2EqIubP7HHHxrSsqS",
stubDevelopmentEvents: true, // [DO NOT TOUCH] Core team only.
label: "telemetry_id",
/*
Key value pairs of events that should be debounced to prevent spamming the logs.
This should be used for events that could be triggered in rapid succession that are not useful to atomically log.
The value is the number of seconds to debounce the event
*/
debounced: {
sent_chat: 1800,
agent_chat_sent: 1800,
agent_chat_started: 1800,
agent_tool_call: 1800,
// Document mgmt events
document_uploaded: 30,
documents_embedded_in_workspace: 30,
link_uploaded: 30,
raw_document_uploaded: 30,
document_parsed: 30,
},
id: async function () {
const result = await SystemSettings.get({ label: this.label });
return result?.value || null;
},
connect: async function () {
const client = this.client();
const distinctId = await this.findOrCreateId();
return { client, distinctId };
},
isDev: function () {
return process.env.NODE_ENV === "development" && this.stubDevelopmentEvents;
},
client: function () {
if (process.env.DISABLE_TELEMETRY === "true" || this.isDev()) return null;
const { PostHog } = require("posthog-node");
return new PostHog(this.pubkey);
},
runtime: function () {
if (process.env.ANYTHING_LLM_RUNTIME === "docker") return "docker";
if (process.env.NODE_ENV === "production") return "production";
return "other";
},
/**
* Checks if the event is on cooldown
* @param {string} event - The event to check
* @returns {boolean} - True if the event is on cooldown, false otherwise
*/
isOnCooldown: function (event) {
// If the event is not debounced, return false
if (!this.debounced[event]) return false;
// If the event is not in the cooldown map, return false
const lastSent = TelemetryCooldown.get(event);
if (!lastSent) return false;
// If the event is in the cooldown map, check if it has expired
const now = Date.now();
const cooldown = this.debounced[event] * 1000;
return now - lastSent < cooldown;
},
/**
* Marks the event as on cooldown - will check if the event is debounced first
* @param {string} event - The event to mark
*/
markOnCooldown: function (event) {
if (!this.debounced[event]) return;
TelemetryCooldown.set(event, Date.now());
},
sendTelemetry: async function (
event,
eventProperties = {},
subUserId = null,
silent = false
) {
try {
const { client, distinctId: systemId } = await this.connect();
if (!client) return;
const distinctId = !!subUserId ? `${systemId}::${subUserId}` : systemId;
const properties = { ...eventProperties, runtime: this.runtime() };
// If the event is on cooldown, return
if (this.isOnCooldown(event)) return;
// Silence some events to keep logs from being too messy in production
// eg: Tool calls from agents spamming the logs.
if (!silent) {
console.log(`\x1b[32m[TELEMETRY SENT]\x1b[0m`, {
event,
distinctId,
properties,
});
}
client.capture({
event,
distinctId,
properties,
});
} catch {
return;
} finally {
// Mark the event as on cooldown if needed
this.markOnCooldown(event);
}
},
flush: async function () {
const client = this.client();
if (!client) return;
await client.shutdownAsync();
},
setUid: async function () {
const newId = v4();
await SystemSettings._updateSettings({ [this.label]: newId });
return newId;
},
findOrCreateId: async function () {
let currentId = await this.id();
if (currentId) return currentId;
currentId = await this.setUid();
return currentId;
},
};
module.exports = { Telemetry };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/apiKeys.js | server/models/apiKeys.js | const prisma = require("../utils/prisma");
const ApiKey = {
tablename: "api_keys",
writable: [],
makeSecret: () => {
const uuidAPIKey = require("uuid-apikey");
return uuidAPIKey.create().apiKey;
},
create: async function (createdByUserId = null) {
try {
const apiKey = await prisma.api_keys.create({
data: {
secret: this.makeSecret(),
createdBy: createdByUserId,
},
});
return { apiKey, error: null };
} catch (error) {
console.error("FAILED TO CREATE API KEY.", error.message);
return { apiKey: null, error: error.message };
}
},
get: async function (clause = {}) {
try {
const apiKey = await prisma.api_keys.findFirst({ where: clause });
return apiKey;
} catch (error) {
console.error("FAILED TO GET API KEY.", error.message);
return null;
}
},
count: async function (clause = {}) {
try {
const count = await prisma.api_keys.count({ where: clause });
return count;
} catch (error) {
console.error("FAILED TO COUNT API KEYS.", error.message);
return 0;
}
},
delete: async function (clause = {}) {
try {
await prisma.api_keys.deleteMany({ where: clause });
return true;
} catch (error) {
console.error("FAILED TO DELETE API KEY.", error.message);
return false;
}
},
where: async function (clause = {}, limit) {
try {
const apiKeys = await prisma.api_keys.findMany({
where: clause,
take: limit,
});
return apiKeys;
} catch (error) {
console.error("FAILED TO GET API KEYS.", error.message);
return [];
}
},
whereWithUser: async function (clause = {}, limit) {
try {
const { User } = require("./user");
const apiKeys = await this.where(clause, limit);
for (const apiKey of apiKeys) {
if (!apiKey.createdBy) continue;
const user = await User.get({ id: apiKey.createdBy });
if (!user) continue;
apiKey.createdBy = {
id: user.id,
username: user.username,
role: user.role,
};
}
return apiKeys;
} catch (error) {
console.error("FAILED TO GET API KEYS WITH USER.", error.message);
return [];
}
},
};
module.exports = { ApiKey };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/documentSyncQueue.js | server/models/documentSyncQueue.js | const { BackgroundService } = require("../utils/BackgroundWorkers");
const prisma = require("../utils/prisma");
const { SystemSettings } = require("./systemSettings");
const { Telemetry } = require("./telemetry");
/**
* @typedef {('link'|'youtube'|'confluence'|'github'|'gitlab')} validFileType
*/
const DocumentSyncQueue = {
featureKey: "experimental_live_file_sync",
// update the validFileTypes and .canWatch properties when adding elements here.
validFileTypes: [
"link",
"youtube",
"confluence",
"github",
"gitlab",
"drupalwiki",
],
defaultStaleAfter: 604800000,
maxRepeatFailures: 5, // How many times a run can fail in a row before pruning.
writable: [],
bootWorkers: function () {
new BackgroundService().boot();
},
killWorkers: function () {
new BackgroundService().stop();
},
/** Check is the Document Sync/Watch feature is enabled and can be used. */
enabled: async function () {
return (
(await SystemSettings.get({ label: this.featureKey }))?.value ===
"enabled"
);
},
/**
* @param {import("@prisma/client").document_sync_queues} queueRecord - queue record to calculate for
*/
calcNextSync: function (queueRecord) {
return new Date(Number(new Date()) + queueRecord.staleAfterMs);
},
/**
* Check if the document can be watched based on the metadata fields
* @param {object} metadata - metadata to check
* @param {string} metadata.title - title of the document
* @param {string} metadata.chunkSource - chunk source of the document
* @returns {boolean} - true if the document can be watched, false otherwise
*/
canWatch: function ({ title, chunkSource = null } = {}) {
if (!chunkSource) return false;
if (chunkSource.startsWith("link://") && title.endsWith(".html"))
return true; // If is web-link material (prior to feature most chunkSources were links://)
if (chunkSource.startsWith("youtube://")) return true; // If is a youtube link
if (chunkSource.startsWith("confluence://")) return true; // If is a confluence document link
if (chunkSource.startsWith("github://")) return true; // If is a GitHub file reference
if (chunkSource.startsWith("gitlab://")) return true; // If is a GitLab file reference
if (chunkSource.startsWith("drupalwiki://")) return true; // If is a DrupalWiki document link
return false;
},
/**
* Creates Queue record and updates document watch status to true on Document record
* @param {import("@prisma/client").workspace_documents} document - document record to watch, must have `id`
*/
watch: async function (document = null) {
if (!document) return false;
try {
const { Document } = require("./documents");
// Get all documents that are watched and share the same unique filename. If this value is
// non-zero then we exit early so that we do not have duplicated watch queues for the same file
// across many workspaces.
const workspaceDocIds = (
await Document.where({ filename: document.filename, watched: true })
).map((rec) => rec.id);
const hasRecords =
(await this.count({ workspaceDocId: { in: workspaceDocIds } })) > 0;
if (hasRecords)
throw new Error(
`Cannot watch this document again - it already has a queue set.`
);
const queue = await prisma.document_sync_queues.create({
data: {
workspaceDocId: document.id,
nextSyncAt: new Date(Number(new Date()) + this.defaultStaleAfter),
},
});
await Document._updateAll(
{ filename: document.filename },
{ watched: true }
);
return queue || null;
} catch (error) {
console.error(error.message);
return null;
}
},
/**
* Deletes Queue record and updates document watch status to false on Document record
* @param {import("@prisma/client").workspace_documents} document - document record to unwatch, must have `id`
*/
unwatch: async function (document = null) {
if (!document) return false;
try {
const { Document } = require("./documents");
// We could have been given a document to unwatch which is a clone of one that is already being watched but by another workspaceDocument id.
// so in this instance we need to delete any queues related to this document by any WorkspaceDocumentId it is referenced by.
const workspaceDocIds = (
await Document.where({ filename: document.filename, watched: true })
).map((rec) => rec.id);
await this.delete({ workspaceDocId: { in: workspaceDocIds } });
await Document._updateAll(
{ filename: document.filename },
{ watched: false }
);
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
_update: async function (id = null, data = {}) {
if (!id) throw new Error("No id provided for update");
try {
await prisma.document_sync_queues.update({
where: { id },
data,
});
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
get: async function (clause = {}) {
try {
const queue = await prisma.document_sync_queues.findFirst({
where: clause,
});
return queue || null;
} catch (error) {
console.error(error.message);
return null;
}
},
where: async function (
clause = {},
limit = null,
orderBy = null,
include = {}
) {
try {
const results = await prisma.document_sync_queues.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
...(include !== null ? { include } : {}),
});
return results;
} catch (error) {
console.error(error.message);
return [];
}
},
count: async function (clause = {}, limit = null) {
try {
const count = await prisma.document_sync_queues.count({
where: clause,
...(limit !== null ? { take: limit } : {}),
});
return count;
} catch (error) {
console.error("FAILED TO COUNT DOCUMENTS.", error.message);
return 0;
}
},
delete: async function (clause = {}) {
try {
await prisma.document_sync_queues.deleteMany({ where: clause });
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
/**
* Gets the "stale" queues where the queue's nextSyncAt is less than the current time
* @returns {Promise<(
* import("@prisma/client").document_sync_queues &
* { workspaceDoc: import("@prisma/client").workspace_documents &
* { workspace: import("@prisma/client").workspaces }
* })[]}>}
*/
staleDocumentQueues: async function () {
const queues = await this.where(
{
nextSyncAt: {
lte: new Date().toISOString(),
},
},
null,
null,
{
workspaceDoc: {
include: {
workspace: true,
},
},
}
);
return queues;
},
saveRun: async function (queueId = null, status = null, result = {}) {
const { DocumentSyncRun } = require("./documentSyncRun");
return DocumentSyncRun.save(queueId, status, result);
},
/**
* Updates document to be watched/unwatched & creates or deletes any queue records and updated Document record `watched` status
* @param {import("@prisma/client").workspace_documents} documentRecord
* @param {boolean} watchStatus - indicate if queue record should be created or not.
* @returns
*/
toggleWatchStatus: async function (documentRecord, watchStatus = false) {
if (!watchStatus) {
await Telemetry.sendTelemetry("document_unwatched");
await this.unwatch(documentRecord);
return;
}
await this.watch(documentRecord);
await Telemetry.sendTelemetry("document_watched");
return;
},
};
module.exports = { DocumentSyncQueue };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/models/workspace.js | server/models/workspace.js | const prisma = require("../utils/prisma");
const slugifyModule = require("slugify");
const { Document } = require("./documents");
const { WorkspaceUser } = require("./workspaceUsers");
const { ROLES } = require("../utils/middleware/multiUserProtected");
const { v4: uuidv4 } = require("uuid");
const { User } = require("./user");
const { PromptHistory } = require("./promptHistory");
const { SystemSettings } = require("./systemSettings");
function isNullOrNaN(value) {
if (value === null) return true;
return isNaN(value);
}
/**
* @typedef {Object} Workspace
* @property {number} id - The ID of the workspace
* @property {string} name - The name of the workspace
* @property {string} slug - The slug of the workspace
* @property {string} openAiPrompt - The OpenAI prompt of the workspace
* @property {string} openAiTemp - The OpenAI temperature of the workspace
* @property {number} openAiHistory - The OpenAI history of the workspace
* @property {number} similarityThreshold - The similarity threshold of the workspace
* @property {string} chatProvider - The chat provider of the workspace
* @property {string} chatModel - The chat model of the workspace
* @property {number} topN - The top N of the workspace
* @property {string} chatMode - The chat mode of the workspace
* @property {string} agentProvider - The agent provider of the workspace
* @property {string} agentModel - The agent model of the workspace
* @property {string} queryRefusalResponse - The query refusal response of the workspace
* @property {string} vectorSearchMode - The vector search mode of the workspace
*/
const Workspace = {
defaultPrompt: SystemSettings.saneDefaultSystemPrompt,
// Used for generic updates so we can validate keys in request body
// commented fields are not writable, but are available on the db object
writable: [
"name",
// "slug",
// "vectorTag",
"openAiTemp",
"openAiHistory",
"lastUpdatedAt",
"openAiPrompt",
"similarityThreshold",
"chatProvider",
"chatModel",
"topN",
"chatMode",
// "pfpFilename",
"agentProvider",
"agentModel",
"queryRefusalResponse",
"vectorSearchMode",
],
validations: {
name: (value) => {
// If the name is not provided or is not a string then we will use a default name.
// as the name field is not nullable in the db schema or has a default value.
if (!value || typeof value !== "string") return "My Workspace";
return String(value).slice(0, 255);
},
openAiTemp: (value) => {
if (value === null || value === undefined) return null;
const temp = parseFloat(value);
if (isNullOrNaN(temp) || temp < 0) return null;
return temp;
},
openAiHistory: (value) => {
if (value === null || value === undefined) return 20;
const history = parseInt(value);
if (isNullOrNaN(history)) return 20;
if (history < 0) return 0;
return history;
},
similarityThreshold: (value) => {
if (value === null || value === undefined) return 0.25;
const threshold = parseFloat(value);
if (isNullOrNaN(threshold)) return 0.25;
if (threshold < 0) return 0.0;
if (threshold > 1) return 1.0;
return threshold;
},
topN: (value) => {
if (value === null || value === undefined) return 4;
const n = parseInt(value);
if (isNullOrNaN(n)) return 4;
if (n < 1) return 1;
return n;
},
chatMode: (value) => {
if (!value || !["chat", "query"].includes(value)) return "chat";
return value;
},
chatProvider: (value) => {
if (!value || typeof value !== "string" || value === "none") return null;
return String(value);
},
chatModel: (value) => {
if (!value || typeof value !== "string") return null;
return String(value);
},
agentProvider: (value) => {
if (!value || typeof value !== "string" || value === "none") return null;
return String(value);
},
agentModel: (value) => {
if (!value || typeof value !== "string") return null;
return String(value);
},
queryRefusalResponse: (value) => {
if (!value || typeof value !== "string") return null;
return String(value);
},
openAiPrompt: (value) => {
if (!value || typeof value !== "string") return null;
return String(value);
},
vectorSearchMode: (value) => {
if (
!value ||
typeof value !== "string" ||
!["default", "rerank"].includes(value)
)
return "default";
return value;
},
},
/**
* The default Slugify module requires some additional mapping to prevent downstream issues
* with some vector db providers and instead of building a normalization method for every provider
* we can capture this on the table level to not have to worry about it.
* @param {...any} args - slugify args for npm package.
* @returns {string}
*/
slugify: function (...args) {
slugifyModule.extend({
"+": " plus ",
"!": " bang ",
"@": " at ",
"*": " splat ",
".": " dot ",
":": "",
"~": "",
"(": "",
")": "",
"'": "",
'"': "",
"|": "",
});
return slugifyModule(...args);
},
/**
* Validate the fields for a workspace update.
* @param {Object} updates - The updates to validate - should be writable fields
* @returns {Object} The validated updates. Only valid fields are returned.
*/
validateFields: function (updates = {}) {
const validatedFields = {};
for (const [key, value] of Object.entries(updates)) {
if (!this.writable.includes(key)) continue;
if (this.validations[key]) {
validatedFields[key] = this.validations[key](value);
} else {
// If there is no validation for the field then we will just pass it through.
validatedFields[key] = value;
}
}
return validatedFields;
},
/**
* Create a new workspace.
* @param {string} name - The name of the workspace.
* @param {number} creatorId - The ID of the user creating the workspace.
* @param {Object} additionalFields - Additional fields to apply to the workspace - will be validated.
* @returns {Promise<{workspace: Object | null, message: string | null}>} A promise that resolves to an object containing the created workspace and an error message if applicable.
*/
new: async function (name = null, creatorId = null, additionalFields = {}) {
if (!name) return { workspace: null, message: "name cannot be null" };
var slug = this.slugify(name, { lower: true });
slug = slug || uuidv4();
const existingBySlug = await this.get({ slug });
if (existingBySlug !== null) {
const slugSeed = Math.floor(10000000 + Math.random() * 90000000);
slug = this.slugify(`${name}-${slugSeed}`, { lower: true });
}
// Get the default system prompt
const defaultSystemPrompt = await SystemSettings.get({
label: "default_system_prompt",
});
if (!!defaultSystemPrompt?.value)
additionalFields.openAiPrompt = defaultSystemPrompt.value;
else additionalFields.openAiPrompt = this.defaultPrompt;
try {
const workspace = await prisma.workspaces.create({
data: {
name: this.validations.name(name),
...this.validateFields(additionalFields),
slug,
},
});
// If created with a user then we need to create the relationship as well.
// If creating with an admin User it wont change anything because admins can
// view all workspaces anyway.
if (!!creatorId) await WorkspaceUser.create(creatorId, workspace.id);
return { workspace, message: null };
} catch (error) {
console.error(error.message);
return { workspace: null, message: error.message };
}
},
/**
* Update the settings for a workspace. Applies validations to the updates provided.
* @param {number} id - The ID of the workspace to update.
* @param {Object} updates - The data to update.
* @returns {Promise<{workspace: Object | null, message: string | null}>} A promise that resolves to an object containing the updated workspace and an error message if applicable.
*/
update: async function (id = null, updates = {}) {
if (!id) throw new Error("No workspace id provided for update");
const validatedUpdates = this.validateFields(updates);
if (Object.keys(validatedUpdates).length === 0)
return { workspace: { id }, message: "No valid fields to update!" };
// If the user unset the chatProvider we will need
// to then clear the chatModel as well to prevent confusion during
// LLM loading.
if (validatedUpdates?.chatProvider === "default") {
validatedUpdates.chatProvider = null;
validatedUpdates.chatModel = null;
}
return this._update(id, validatedUpdates);
},
/**
* Direct update of workspace settings without any validation.
* @param {number} id - The ID of the workspace to update.
* @param {Object} data - The data to update.
* @returns {Promise<{workspace: Object | null, message: string | null}>} A promise that resolves to an object containing the updated workspace and an error message if applicable.
*/
_update: async function (id = null, data = {}) {
if (!id) throw new Error("No workspace id provided for update");
try {
const workspace = await prisma.workspaces.update({
where: { id },
data,
});
return { workspace, message: null };
} catch (error) {
console.error(error.message);
return { workspace: null, message: error.message };
}
},
getWithUser: async function (user = null, clause = {}) {
if ([ROLES.admin, ROLES.manager].includes(user.role))
return this.get(clause);
try {
const workspace = await prisma.workspaces.findFirst({
where: {
...clause,
workspace_users: {
some: {
user_id: user?.id,
},
},
},
include: {
workspace_users: true,
documents: true,
},
});
if (!workspace) return null;
return {
...workspace,
documents: await Document.forWorkspace(workspace.id),
contextWindow: this._getContextWindow(workspace),
currentContextTokenCount: await this._getCurrentContextTokenCount(
workspace.id
),
};
} catch (error) {
console.error(error.message);
return null;
}
},
/**
* Get the total token count of all parsed files in a workspace/thread
* @param {number} workspaceId - The ID of the workspace
* @param {number|null} threadId - Optional thread ID to filter by
* @returns {Promise<number>} Total token count of all files
* @private
*/
async _getCurrentContextTokenCount(workspaceId, threadId = null) {
const { WorkspaceParsedFiles } = require("./workspaceParsedFiles");
return await WorkspaceParsedFiles.totalTokenCount({
workspaceId: Number(workspaceId),
threadId: threadId ? Number(threadId) : null,
});
},
/**
* Get the context window size for a workspace based on its provider and model settings.
* If the workspace has no provider/model set, falls back to system defaults.
* @param {Workspace} workspace - The workspace to get context window for
* @returns {number|null} The context window size in tokens (defaults to null if no provider/model found)
* @private
*/
_getContextWindow: function (workspace) {
const {
getLLMProviderClass,
getBaseLLMProviderModel,
} = require("../utils/helpers");
const provider = workspace.chatProvider || process.env.LLM_PROVIDER || null;
const LLMProvider = getLLMProviderClass({ provider });
const model =
workspace.chatModel || getBaseLLMProviderModel({ provider }) || null;
if (!provider || !model) return null;
return LLMProvider?.promptWindowLimit?.(model) || null;
},
get: async function (clause = {}) {
try {
const workspace = await prisma.workspaces.findFirst({
where: clause,
include: {
documents: true,
},
});
if (!workspace) return null;
return {
...workspace,
contextWindow: this._getContextWindow(workspace),
currentContextTokenCount: await this._getCurrentContextTokenCount(
workspace.id
),
};
} catch (error) {
console.error(error.message);
return null;
}
},
delete: async function (clause = {}) {
try {
await prisma.workspaces.delete({
where: clause,
});
return true;
} catch (error) {
console.error(error.message);
return false;
}
},
where: async function (clause = {}, limit = null, orderBy = null) {
try {
const results = await prisma.workspaces.findMany({
where: clause,
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return results;
} catch (error) {
console.error(error.message);
return [];
}
},
whereWithUser: async function (
user,
clause = {},
limit = null,
orderBy = null
) {
if ([ROLES.admin, ROLES.manager].includes(user.role))
return await this.where(clause, limit, orderBy);
try {
const workspaces = await prisma.workspaces.findMany({
where: {
...clause,
workspace_users: {
some: {
user_id: user.id,
},
},
},
...(limit !== null ? { take: limit } : {}),
...(orderBy !== null ? { orderBy } : {}),
});
return workspaces;
} catch (error) {
console.error(error.message);
return [];
}
},
whereWithUsers: async function (clause = {}, limit = null, orderBy = null) {
try {
const workspaces = await this.where(clause, limit, orderBy);
for (const workspace of workspaces) {
const userIds = (
await WorkspaceUser.where({ workspace_id: Number(workspace.id) })
).map((rel) => rel.user_id);
workspace.userIds = userIds;
}
return workspaces;
} catch (error) {
console.error(error.message);
return [];
}
},
/**
* Get all users for a workspace.
* @param {number} workspaceId - The ID of the workspace to get users for.
* @returns {Promise<Array<{userId: number, username: string, role: string}>>} A promise that resolves to an array of user objects.
*/
workspaceUsers: async function (workspaceId) {
try {
const users = (
await WorkspaceUser.where({ workspace_id: Number(workspaceId) })
).map((rel) => rel);
const usersById = await User.where({
id: { in: users.map((user) => user.user_id) },
});
const userInfo = usersById.map((user) => {
const workspaceUser = users.find((u) => u.user_id === user.id);
return {
userId: user.id,
username: user.username,
role: user.role,
lastUpdatedAt: workspaceUser.lastUpdatedAt,
};
});
return userInfo;
} catch (error) {
console.error(error.message);
return [];
}
},
/**
* Update the users for a workspace. Will remove all existing users and replace them with the new list.
* @param {number} workspaceId - The ID of the workspace to update.
* @param {number[]} userIds - An array of user IDs to add to the workspace.
* @returns {Promise<{success: boolean, error: string | null}>} A promise that resolves to an object containing the success status and an error message if applicable.
*/
updateUsers: async function (workspaceId, userIds = []) {
try {
await WorkspaceUser.delete({ workspace_id: Number(workspaceId) });
await WorkspaceUser.createManyUsers(userIds, workspaceId);
return { success: true, error: null };
} catch (error) {
console.error(error.message);
return { success: false, error: error.message };
}
},
trackChange: async function (prevData, newData, user) {
try {
await this._trackWorkspacePromptChange(prevData, newData, user);
return;
} catch (error) {
console.error("Error tracking workspace change:", error.message);
return;
}
},
/**
* We are tracking this change to determine the need to a prompt library or
* prompt assistant feature. If this is something you would like to see - tell us on GitHub!
* We now track the prompt change in the PromptHistory model.
* which is a sub-model of the Workspace model.
* @param {Workspace} prevData - The previous data of the workspace.
* @param {Workspace} newData - The new data of the workspace.
* @param {{id: number, role: string}|null} user - The user who made the change.
* @returns {Promise<void>}
*/
_trackWorkspacePromptChange: async function (prevData, newData, user = null) {
if (
!!newData?.openAiPrompt && // new prompt is set
!!prevData?.openAiPrompt && // previous prompt was not null (default)
prevData?.openAiPrompt !== this.defaultPrompt && // previous prompt was not default
newData?.openAiPrompt !== prevData?.openAiPrompt // previous and new prompt are not the same
)
await PromptHistory.handlePromptChange(prevData, user); // log the change to the prompt history
const { Telemetry } = require("./telemetry");
const { EventLogs } = require("./eventLogs");
if (
!newData?.openAiPrompt || // no prompt change
newData?.openAiPrompt === this.defaultPrompt || // new prompt is default prompt
newData?.openAiPrompt === prevData?.openAiPrompt // same prompt
)
return;
await Telemetry.sendTelemetry("workspace_prompt_changed");
await EventLogs.logEvent(
"workspace_prompt_changed",
{
workspaceName: prevData?.name,
prevSystemPrompt: prevData?.openAiPrompt || this.defaultPrompt,
newSystemPrompt: newData?.openAiPrompt,
},
user?.id
);
return;
},
// Direct DB queries for API use only.
/**
* Generic prisma FindMany query for workspaces collections
* @param {import("../node_modules/.prisma/client/index.d.ts").Prisma.TypeMap['model']['workspaces']['operations']['findMany']['args']} prismaQuery
* @returns
*/
_findMany: async function (prismaQuery = {}) {
try {
const results = await prisma.workspaces.findMany(prismaQuery);
return results;
} catch (error) {
console.error(error.message);
return null;
}
},
/**
* Generic prisma query for .get of workspaces collections
* @param {import("../node_modules/.prisma/client/index.d.ts").Prisma.TypeMap['model']['workspaces']['operations']['findFirst']['args']} prismaQuery
* @returns
*/
_findFirst: async function (prismaQuery = {}) {
try {
const results = await prisma.workspaces.findFirst(prismaQuery);
return results;
} catch (error) {
console.error(error.message);
return null;
}
},
/**
* Get the prompt history for a workspace.
* @param {Object} options - The options to get prompt history for.
* @param {number} options.workspaceId - The ID of the workspace to get prompt history for.
* @returns {Promise<Array<{id: number, prompt: string, modifiedAt: Date, modifiedBy: number, user: {id: number, username: string, role: string}}>>} A promise that resolves to an array of prompt history objects.
*/
promptHistory: async function ({ workspaceId }) {
try {
const results = await PromptHistory.forWorkspace(workspaceId);
return results;
} catch (error) {
console.error(error.message);
return [];
}
},
/**
* Delete the prompt history for a workspace.
* @param {Object} options - The options to delete the prompt history for.
* @param {number} options.workspaceId - The ID of the workspace to delete prompt history for.
* @returns {Promise<boolean>} A promise that resolves to a boolean indicating the success of the operation.
*/
deleteAllPromptHistory: async function ({ workspaceId }) {
try {
return await PromptHistory.delete({ workspaceId });
} catch (error) {
console.error(error.message);
return false;
}
},
/**
* Delete the prompt history for a workspace.
* @param {Object} options - The options to delete the prompt history for.
* @param {number} options.workspaceId - The ID of the workspace to delete prompt history for.
* @param {number} options.id - The ID of the prompt history to delete.
* @returns {Promise<boolean>} A promise that resolves to a boolean indicating the success of the operation.
*/
deletePromptHistory: async function ({ workspaceId, id }) {
try {
return await PromptHistory.delete({ id, workspaceId });
} catch (error) {
console.error(error.message);
return false;
}
},
};
module.exports = { Workspace };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/swagger/index.js | server/swagger/index.js | function waitForElm(selector) {
return new Promise(resolve => {
if (document.querySelector(selector)) {
return resolve(document.querySelector(selector));
}
const observer = new MutationObserver(mutations => {
if (document.querySelector(selector)) {
resolve(document.querySelector(selector));
observer.disconnect();
}
});
observer.observe(document.body, {
childList: true,
subtree: true
});
});
}
// Force change the Swagger logo in the header
waitForElm('.topbar-wrapper').then((elm) => {
if (window.SWAGGER_DOCS_ENV === 'development') {
elm.innerHTML = `<img href='${window.location.origin}' src='http://localhost:3000/public/anything-llm-light.png' width='200'/>`
} else {
elm.innerHTML = `<img href='${window.location.origin}' src='${window.location.origin}/anything-llm-light.png' width='200'/>`
}
}); | javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/swagger/init.js | server/swagger/init.js | const swaggerAutogen = require("swagger-autogen")({ openapi: "3.0.0" });
const fs = require("fs");
const path = require("path");
const doc = {
info: {
version: "1.0.0",
title: "AnythingLLM Developer API",
description:
"API endpoints that enable programmatic reading, writing, and updating of your AnythingLLM instance. UI supplied by Swagger.io.",
},
// Swagger-autogen does not allow us to use relative paths as these will resolve to
// http:///api in the openapi.json file, so we need to monkey-patch this post-generation.
host: "/api",
schemes: ["http"],
securityDefinitions: {
BearerAuth: {
type: "http",
scheme: "bearer",
bearerFormat: "JWT",
},
},
security: [{ BearerAuth: [] }],
definitions: {
InvalidAPIKey: {
message: "Invalid API Key",
},
},
};
const outputFile = path.resolve(__dirname, "./openapi.json");
const endpointsFiles = [
"../endpoints/api/auth/index.js",
"../endpoints/api/admin/index.js",
"../endpoints/api/document/index.js",
"../endpoints/api/workspace/index.js",
"../endpoints/api/system/index.js",
"../endpoints/api/workspaceThread/index.js",
"../endpoints/api/userManagement/index.js",
"../endpoints/api/openai/index.js",
"../endpoints/api/embed/index.js",
];
swaggerAutogen(outputFile, endpointsFiles, doc).then(({ data }) => {
// Remove Authorization parameters from arguments.
for (const path of Object.keys(data.paths)) {
if (data.paths[path].hasOwnProperty("get")) {
let parameters = data.paths[path].get?.parameters || [];
parameters = parameters.filter((arg) => arg.name !== "Authorization");
data.paths[path].get.parameters = parameters;
}
if (data.paths[path].hasOwnProperty("post")) {
let parameters = data.paths[path].post?.parameters || [];
parameters = parameters.filter((arg) => arg.name !== "Authorization");
data.paths[path].post.parameters = parameters;
}
if (data.paths[path].hasOwnProperty("delete")) {
let parameters = data.paths[path].delete?.parameters || [];
parameters = parameters.filter((arg) => arg.name !== "Authorization");
data.paths[path].delete.parameters = parameters;
}
}
const openApiSpec = {
...data,
servers: [
{
url: "/api",
},
],
};
fs.writeFileSync(outputFile, JSON.stringify(openApiSpec, null, 2), {
encoding: "utf-8",
flag: "w",
});
console.log(`Swagger-autogen: \x1b[32mPatched servers.url β\x1b[0m`);
});
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/swagger/utils.js | server/swagger/utils.js | const fs = require('fs');
const path = require('path');
const swaggerUi = require('swagger-ui-express');
function faviconUrl() {
return process.env.NODE_ENV === "production" ?
'/public/favicon.png' :
'http://localhost:3000/public/favicon.png'
}
function useSwagger(app) {
if (process.env.DISABLE_SWAGGER_DOCS === "true") {
console.log(
`\x1b[33m[SWAGGER DISABLED]\x1b[0m Swagger documentation is disabled via DISABLE_SWAGGER_DOCS environment variable.`
);
return;
}
app.use('/api/docs', swaggerUi.serve);
const options = {
customCss: [
fs.readFileSync(path.resolve(__dirname, 'index.css')),
fs.readFileSync(path.resolve(__dirname, 'dark-swagger.css'))
].join('\n\n\n'),
customSiteTitle: 'AnythingLLM Developer API Documentation',
customfavIcon: faviconUrl(),
}
if (process.env.NODE_ENV === "production") {
const swaggerDocument = require('./openapi.json');
app.get('/api/docs', swaggerUi.setup(
swaggerDocument,
{
...options,
customJsStr: 'window.SWAGGER_DOCS_ENV = "production";\n\n' + fs.readFileSync(path.resolve(__dirname, 'index.js'), 'utf8'),
},
));
} else {
// we regenerate the html page only in development mode to ensure it is up-to-date when the code is hot-reloaded.
app.get(
"/api/docs",
async (_, response) => {
// #swagger.ignore = true
const swaggerDocument = require('./openapi.json');
return response.send(
swaggerUi.generateHTML(
swaggerDocument,
{
...options,
customJsStr: 'window.SWAGGER_DOCS_ENV = "development";\n\n' + fs.readFileSync(path.resolve(__dirname, 'index.js'), 'utf8'),
}
)
);
}
);
}
}
module.exports = { faviconUrl, useSwagger } | javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/togetherAi/index.js | server/utils/AiProviders/togetherAi/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const fs = require("fs");
const path = require("path");
const { safeJsonParse } = require("../../http");
const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "togetherAi")
: path.resolve(__dirname, `../../../storage/models/togetherAi`)
);
async function togetherAiModels(apiKey = null) {
const cacheModelPath = path.resolve(cacheFolder, "models.json");
const cacheAtPath = path.resolve(cacheFolder, ".cached_at");
// If cache exists and is less than 1 week old, use it
if (fs.existsSync(cacheModelPath) && fs.existsSync(cacheAtPath)) {
const now = Number(new Date());
const timestampMs = Number(fs.readFileSync(cacheAtPath));
if (now - timestampMs <= 6.048e8) {
// 1 Week in MS
return safeJsonParse(
fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
[]
);
}
}
try {
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
baseURL: "https://api.together.xyz/v1",
apiKey: apiKey || process.env.TOGETHER_AI_API_KEY || null,
});
const response = await openai.models.list();
// Filter and transform models into the expected format
// Only include chat models
const validModels = response.body
.filter((model) => ["chat"].includes(model.type))
.map((model) => ({
id: model.id,
name: model.display_name || model.id,
organization: model.organization || "Unknown",
type: model.type,
maxLength: model.context_length || 4096,
}));
// Cache the results
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(cacheModelPath, JSON.stringify(validModels), {
encoding: "utf-8",
});
fs.writeFileSync(cacheAtPath, String(Number(new Date())), {
encoding: "utf-8",
});
return validModels;
} catch (error) {
console.error("Error fetching Together AI models:", error);
// If cache exists but is stale, still use it as fallback
if (fs.existsSync(cacheModelPath)) {
return safeJsonParse(
fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
[]
);
}
return [];
}
}
class TogetherAiLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.TOGETHER_AI_API_KEY)
throw new Error("No TogetherAI API key was set.");
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
baseURL: "https://api.together.xyz/v1",
apiKey: process.env.TOGETHER_AI_API_KEY ?? null,
});
this.model = modelPreference || process.env.TOGETHER_AI_MODEL_PREF;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = !embedder ? new NativeEmbedder() : embedder;
this.defaultTemp = 0.7;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
},
});
}
return content.flat();
}
async allModelInformation() {
const models = await togetherAiModels();
return models.reduce((acc, model) => {
acc[model.id] = model;
return acc;
}, {});
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static async promptWindowLimit(modelName) {
const models = await togetherAiModels();
const model = models.find((m) => m.id === modelName);
return model?.maxLength || 4096;
}
async promptWindowLimit() {
const models = await togetherAiModels();
const model = models.find((m) => m.id === this.model);
return model?.maxLength || 4096;
}
async isValidChatCompletionModel(model = "") {
const models = await togetherAiModels();
const foundModel = models.find((m) => m.id === model);
return foundModel && foundModel.type === "chat";
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...chatHistory,
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`TogetherAI chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage?.prompt_tokens || 0,
completion_tokens: result.output.usage?.completion_tokens || 0,
total_tokens: result.output.usage?.total_tokens || 0,
outputTps: result.output.usage?.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`TogetherAI chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
TogetherAiLLM,
togetherAiModels,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/moonshotAi/index.js | server/utils/AiProviders/moonshotAi/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
formatChatHistory,
} = require("../../helpers/chat/responses");
const { MODEL_MAP } = require("../modelMap");
class MoonshotAiLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.MOONSHOT_AI_API_KEY)
throw new Error("No Moonshot AI API key was set.");
this.className = "MoonshotAiLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
baseURL: "https://api.moonshot.ai/v1",
apiKey: process.env.MOONSHOT_AI_API_KEY,
});
this.model =
modelPreference ||
process.env.MOONSHOT_AI_MODEL_PREF ||
"moonshot-v1-32k";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log(
`Initialized ${this.model} with context window ${this.promptWindowLimit()}`
);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
},
});
}
return content.flat();
}
streamingEnabled() {
return true;
}
promptWindowLimit() {
return MODEL_MAP.get("moonshot", this.model) ?? 8_192;
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!Object.prototype.hasOwnProperty.call(result.output, "choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
}
module.exports = { MoonshotAiLLM };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/dellProAiStudio/index.js | server/utils/AiProviders/dellProAiStudio/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
handleDefaultStreamResponseV2,
formatChatHistory,
} = require("../../helpers/chat/responses");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
// hybrid of openAi LLM chat completion for Dell Pro AI Studio
class DellProAiStudioLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.DPAIS_LLM_BASE_PATH)
throw new Error("No Dell Pro AI Studio Base Path was set.");
this.className = "DellProAiStudioLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.dpais = new OpenAIApi({
baseURL: DellProAiStudioLLM.parseBasePath(),
apiKey: null,
});
this.model = modelPreference || process.env.DPAIS_LLM_MODEL_PREF;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log(
`Dell Pro AI Studio LLM initialized with ${this.model}. ctx: ${this.promptWindowLimit()}`
);
}
/**
* Parse the base path for the Dell Pro AI Studio API
* so we can use it for inference requests
* @param {string} providedBasePath
* @returns {string}
*/
static parseBasePath(providedBasePath = process.env.DPAIS_LLM_BASE_PATH) {
try {
const baseURL = new URL(providedBasePath);
const basePath = `${baseURL.origin}/v1/openai`;
return basePath;
} catch (e) {
return null;
}
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(_modelName) {
const limit = process.env.DPAIS_LLM_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No Dell Pro AI Studio token context limit was set.");
return Number(limit);
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const limit = process.env.DPAIS_LLM_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No Dell Pro AI Studio token context limit was set.");
return Number(limit);
}
async isValidChatCompletionModel(_ = "") {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) return userPrompt;
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
_attachments = [], // not used for Dell Pro AI Studio - `attachments` passed in is ignored
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, _attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`Dell Pro AI Studio chat: ${this.model} is not valid or defined model for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.dpais.chat.completions.create({
model: this.model,
messages,
temperature,
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage?.prompt_tokens || 0,
completion_tokens: result.output.usage?.completion_tokens || 0,
total_tokens: result.output.usage?.total_tokens || 0,
outputTps: result.output.usage?.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`Dell Pro AI Studio chat: ${this.model} is not valid or defined model for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.dpais.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
DellProAiStudioLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/cohere/index.js | server/utils/AiProviders/cohere/index.js | const { v4 } = require("uuid");
const { writeResponseChunk } = require("../../helpers/chat/responses");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { MODEL_MAP } = require("../modelMap");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
class CohereLLM {
constructor(embedder = null) {
this.className = "CohereLLM";
const { CohereClient } = require("cohere-ai");
if (!process.env.COHERE_API_KEY)
throw new Error("No Cohere API key was set.");
const cohere = new CohereClient({
token: process.env.COHERE_API_KEY,
});
this.cohere = cohere;
this.model = process.env.COHERE_MODEL_PREF;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.#log(
`Initialized with model ${this.model}. ctx: ${this.promptWindowLimit()}`
);
}
#log(text, ...args) {
console.log(`\x1b[32m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
#convertChatHistoryCohere(chatHistory = []) {
let cohereHistory = [];
chatHistory.forEach((message) => {
switch (message.role) {
case "system":
cohereHistory.push({ role: "SYSTEM", message: message.content });
break;
case "user":
cohereHistory.push({ role: "USER", message: message.content });
break;
case "assistant":
cohereHistory.push({ role: "CHATBOT", message: message.content });
break;
}
});
return cohereHistory;
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
return MODEL_MAP.get("cohere", modelName) ?? 4_096;
}
promptWindowLimit() {
return MODEL_MAP.get("cohere", this.model) ?? 4_096;
}
async isValidChatCompletionModel() {
return true;
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const message = messages[messages.length - 1].content; // Get the last message
const cohereHistory = this.#convertChatHistoryCohere(messages.slice(0, -1)); // Remove the last message and convert to Cohere
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.cohere.chat({
model: this.model,
message: message,
chatHistory: cohereHistory,
temperature,
})
);
if (
!result.output.hasOwnProperty("text") ||
result.output.text.length === 0
)
return null;
const promptTokens = result.output.meta?.tokens?.inputTokens || 0;
const completionTokens = result.output.meta?.tokens?.outputTokens || 0;
return {
textResponse: result.output.text,
metrics: {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
outputTps: completionTokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const message = messages[messages.length - 1].content; // Get the last message
const cohereHistory = this.#convertChatHistoryCohere(messages.slice(0, -1)); // Remove the last message and convert to Cohere
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.cohere.chatStream({
model: this.model,
message: message,
chatHistory: cohereHistory,
temperature,
}),
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
});
return measuredStreamRequest;
}
/**
* Handles the stream response from the Cohere API.
* @param {Object} response - the response object
* @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Cohere API w/tracking
* @param {Object} responseProps - the response properties
* @returns {Promise<string>}
*/
async handleStream(response, stream, responseProps) {
return new Promise(async (resolve) => {
const { uuid = v4(), sources = [] } = responseProps;
let fullText = "";
let usage = {
prompt_tokens: 0,
completion_tokens: 0,
};
const handleAbort = () => {
writeResponseChunk(response, {
uuid,
sources,
type: "abort",
textResponse: fullText,
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream.endMeasurement(usage);
resolve(fullText);
};
response.on("close", handleAbort);
try {
for await (const chat of stream) {
if (chat.eventType === "stream-end") {
const usageMetrics = chat?.response?.meta?.tokens || {};
usage.prompt_tokens = usageMetrics.inputTokens || 0;
usage.completion_tokens = usageMetrics.outputTokens || 0;
}
if (chat.eventType === "text-generation") {
const text = chat.text;
fullText += text;
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: text,
close: false,
error: false,
});
}
}
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream.endMeasurement(usage);
resolve(fullText);
} catch (error) {
writeResponseChunk(response, {
uuid,
sources,
type: "abort",
textResponse: null,
close: true,
error: error.message,
});
response.removeListener("close", handleAbort);
stream.endMeasurement(usage);
resolve(fullText);
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
CohereLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/textGenWebUI/index.js | server/utils/AiProviders/textGenWebUI/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
handleDefaultStreamResponseV2,
formatChatHistory,
} = require("../../helpers/chat/responses");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
class TextGenWebUILLM {
constructor(embedder = null) {
const { OpenAI: OpenAIApi } = require("openai");
if (!process.env.TEXT_GEN_WEB_UI_BASE_PATH)
throw new Error(
"TextGenWebUI must have a valid base path to use for the api."
);
this.className = "TextGenWebUILLM";
this.basePath = process.env.TEXT_GEN_WEB_UI_BASE_PATH;
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: process.env.TEXT_GEN_WEB_UI_API_KEY ?? null,
});
this.model = null;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(_modelName) {
const limit = process.env.TEXT_GEN_WEB_UI_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No token context limit was set.");
return Number(limit);
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const limit = process.env.TEXT_GEN_WEB_UI_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No token context limit was set.");
return Number(limit);
}
// Short circuit since we have no idea if the model is valid or not
// in pre-flight for generic endpoints
isValidChatCompletionModel(_modelName = "") {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage?.prompt_tokens || 0,
completion_tokens: result.output.usage?.completion_tokens || 0,
total_tokens: result.output.usage?.total_tokens || 0,
outputTps: result.output.usage?.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
TextGenWebUILLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/koboldCPP/index.js | server/utils/AiProviders/koboldCPP/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
clientAbortedHandler,
writeResponseChunk,
formatChatHistory,
} = require("../../helpers/chat/responses");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const { v4: uuidv4 } = require("uuid");
class KoboldCPPLLM {
constructor(embedder = null, modelPreference = null) {
const { OpenAI: OpenAIApi } = require("openai");
if (!process.env.KOBOLD_CPP_BASE_PATH)
throw new Error(
"KoboldCPP must have a valid base path to use for the api."
);
this.className = "KoboldCPPLLM";
this.basePath = process.env.KOBOLD_CPP_BASE_PATH;
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: null,
});
this.model = modelPreference ?? process.env.KOBOLD_CPP_MODEL_PREF ?? null;
if (!this.model) throw new Error("KoboldCPP must have a valid model set.");
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.maxTokens = Number(process.env.KOBOLD_CPP_MAX_TOKENS) || 2048;
this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(_modelName) {
const limit = process.env.KOBOLD_CPP_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No token context limit was set.");
return Number(limit);
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const limit = process.env.KOBOLD_CPP_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No token context limit was set.");
return Number(limit);
}
// Short circuit since we have no idea if the model is valid or not
// in pre-flight for generic endpoints
isValidChatCompletionModel(_modelName = "") {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
max_tokens: this.maxTokens,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
const promptTokens = LLMPerformanceMonitor.countTokens(messages);
const completionTokens = LLMPerformanceMonitor.countTokens([
{ content: result.output.choices[0].message.content },
]);
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
outputTps: completionTokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
max_tokens: this.maxTokens,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => {
let fullText = "";
let usage = {
prompt_tokens: LLMPerformanceMonitor.countTokens(stream.messages || []),
completion_tokens: 0,
};
const handleAbort = () => {
usage.completion_tokens = LLMPerformanceMonitor.countTokens([
{ content: fullText },
]);
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
if (token) {
fullText += token;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
// KoboldCPP finishes with "length" or "stop"
if (
message.finish_reason !== "null" &&
(message.finish_reason === "length" ||
message.finish_reason === "stop")
) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
usage.completion_tokens = LLMPerformanceMonitor.countTokens([
{ content: fullText },
]);
stream?.endMeasurement(usage);
resolve(fullText);
}
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
KoboldCPPLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/localAi/index.js | server/utils/AiProviders/localAi/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
formatChatHistory,
} = require("../../helpers/chat/responses");
class LocalAiLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.LOCAL_AI_BASE_PATH)
throw new Error("No LocalAI Base Path was set.");
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
baseURL: process.env.LOCAL_AI_BASE_PATH,
apiKey: process.env.LOCAL_AI_API_KEY ?? null,
});
this.model = modelPreference || process.env.LOCAL_AI_MODEL_PREF;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(_modelName) {
const limit = process.env.LOCAL_AI_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No LocalAi token context limit was set.");
return Number(limit);
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const limit = process.env.LOCAL_AI_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No LocalAi token context limit was set.");
return Number(limit);
}
async isValidChatCompletionModel(_ = "") {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`LocalAI chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions.create({
model: this.model,
messages,
temperature,
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
const promptTokens = LLMPerformanceMonitor.countTokens(messages);
const completionTokens = LLMPerformanceMonitor.countTokens(
result.output.choices[0].message.content
);
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
outputTps: completionTokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`LocalAi chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
LocalAiLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/huggingface/index.js | server/utils/AiProviders/huggingface/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
class HuggingFaceLLM {
constructor(embedder = null, _modelPreference = null) {
if (!process.env.HUGGING_FACE_LLM_ENDPOINT)
throw new Error("No HuggingFace Inference Endpoint was set.");
if (!process.env.HUGGING_FACE_LLM_API_KEY)
throw new Error("No HuggingFace Access Token was set.");
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
baseURL: `${process.env.HUGGING_FACE_LLM_ENDPOINT}/v1`,
apiKey: process.env.HUGGING_FACE_LLM_API_KEY,
});
// When using HF inference server - the model param is not required so
// we can stub it here. HF Endpoints can only run one model at a time.
// We set to 'tgi' so that endpoint for HF can accept message format
this.model = "tgi";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.2;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(_modelName) {
const limit = process.env.HUGGING_FACE_LLM_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No HuggingFace token context limit was set.");
return Number(limit);
}
promptWindowLimit() {
const limit = process.env.HUGGING_FACE_LLM_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No HuggingFace token context limit was set.");
return Number(limit);
}
async isValidChatCompletionModel(_ = "") {
return true;
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
// System prompt it not enabled for HF model chats
const prompt = {
role: "user",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
const assistantResponse = {
role: "assistant",
content: "Okay, I will follow those instructions",
};
return [
prompt,
assistantResponse,
...chatHistory,
{ role: "user", content: userPrompt },
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage?.prompt_tokens || 0,
completion_tokens: result.output.usage?.completion_tokens || 0,
total_tokens: result.output.usage?.total_tokens || 0,
outputTps:
(result.output.usage?.completion_tokens || 0) / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
HuggingFaceLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/ppio/index.js | server/utils/AiProviders/ppio/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
const fs = require("fs");
const path = require("path");
const { safeJsonParse } = require("../../http");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "ppio")
: path.resolve(__dirname, `../../../storage/models/ppio`)
);
class PPIOLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.PPIO_API_KEY) throw new Error("No PPIO API key was set.");
this.className = "PPIOLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.basePath = "https://api.ppinfra.com/v3/openai/";
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: process.env.PPIO_API_KEY ?? null,
defaultHeaders: {
"HTTP-Referer": "https://anythingllm.com",
"X-API-Source": "anythingllm",
},
});
this.model =
modelPreference ||
process.env.PPIO_MODEL_PREF ||
"qwen/qwen2.5-32b-instruct";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
this.log(`Loaded with model: ${this.model}`);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
async #syncModels() {
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
return false;
this.log("Model cache is not present or stale. Fetching from PPIO API.");
await fetchPPIOModels();
return;
}
#cacheIsStale() {
const MAX_STALE = 6.048e8; // 1 Week in MS
if (!fs.existsSync(this.cacheAtPath)) return true;
const now = Number(new Date());
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
return now - timestampMs > MAX_STALE;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
promptWindowLimit() {
const model = this.models()[this.model];
if (!model) return 4096; // Default to 4096 if we cannot find the model
return model?.maxLength || 4096;
}
async isValidChatCompletionModel(model = "") {
await this.#syncModels();
const availableModels = this.models();
return Object.prototype.hasOwnProperty.call(availableModels, model);
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
// attachments = [], - not supported
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`PPIO chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!Object.prototype.hasOwnProperty.call(result.output, "choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`PPIO chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
async function fetchPPIOModels() {
return await fetch(`https://api.ppinfra.com/v3/openai/models`, {
method: "GET",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${process.env.PPIO_API_KEY}`,
},
})
.then((res) => res.json())
.then(({ data = [] }) => {
const models = {};
data.forEach((model) => {
const organization = model.id?.split("/")?.[0] || "PPIO";
models[model.id] = {
id: model.id,
name: model.display_name || model.title || model.id,
organization,
maxLength: model.context_size || 4096,
};
});
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(models),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return models;
})
.catch((e) => {
console.error(e);
return {};
});
}
module.exports = {
PPIOLLM,
fetchPPIOModels,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/fireworksAi/index.js | server/utils/AiProviders/fireworksAi/index.js | const fs = require("fs");
const path = require("path");
const { safeJsonParse } = require("../../http");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "fireworks")
: path.resolve(__dirname, `../../../storage/models/fireworks`)
);
class FireworksAiLLM {
constructor(embedder = null, modelPreference = null) {
this.className = "FireworksAiLLM";
if (!process.env.FIREWORKS_AI_LLM_API_KEY)
throw new Error("No FireworksAI API key was set.");
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
baseURL: "https://api.fireworks.ai/inference/v1",
apiKey: process.env.FIREWORKS_AI_LLM_API_KEY ?? null,
});
this.model = modelPreference || process.env.FIREWORKS_AI_LLM_MODEL_PREF;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = !embedder ? new NativeEmbedder() : embedder;
this.defaultTemp = 0.7;
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
// from the current date. If it is, then we will refetch the API so that all the models are up
// to date.
#cacheIsStale() {
const MAX_STALE = 6.048e8; // 1 Week in MS
if (!fs.existsSync(this.cacheAtPath)) return true;
const now = Number(new Date());
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
return now - timestampMs > MAX_STALE;
}
// This function fetches the models from the ApiPie API and caches them locally.
// We do this because the ApiPie API has a lot of models, and we need to get the proper token context window
// for each model and this is a constructor property - so we can really only get it if this cache exists.
// We used to have this as a chore, but given there is an API to get the info - this makes little sense.
// This might slow down the first request, but we need the proper token context window
// for each model and this is a constructor property - so we can really only get it if this cache exists.
async #syncModels() {
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
return false;
this.log(
"Model cache is not present or stale. Fetching from FireworksAI API."
);
await fireworksAiModels();
return;
}
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
const cacheModelPath = path.resolve(cacheFolder, "models.json");
const availableModels = fs.existsSync(cacheModelPath)
? safeJsonParse(
fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
{}
)
: {};
return availableModels[modelName]?.maxLength || 4096;
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const availableModels = this.models();
return availableModels[this.model]?.maxLength || 4096;
}
async isValidChatCompletionModel(model = "") {
await this.#syncModels();
const availableModels = this.models();
return availableModels.hasOwnProperty(model);
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`FireworksAI chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions.create({
model: this.model,
messages,
temperature,
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`FireworksAI chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
async function fireworksAiModels(providedApiKey = null) {
const apiKey = providedApiKey || process.env.FIREWORKS_AI_LLM_API_KEY || null;
const { OpenAI: OpenAIApi } = require("openai");
const client = new OpenAIApi({
baseURL: "https://api.fireworks.ai/inference/v1",
apiKey: apiKey,
});
return await client.models
.list()
.then((res) => res.data)
.then((models = []) => {
const validModels = {};
models.forEach((model) => {
// There are many models - the ones without a context length are not chat models
if (!model.hasOwnProperty("context_length")) return;
validModels[model.id] = {
id: model.id,
name: model.id.split("/").pop(),
organization: model.owned_by,
subtype: model.type,
maxLength: model.context_length ?? 4096,
};
});
if (Object.keys(validModels).length === 0) {
console.log("fireworksAi: No models found");
return {};
}
// Cache all response information
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(validModels),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return validModels;
})
.catch((e) => {
console.error(e);
return {};
});
}
module.exports = {
FireworksAiLLM,
fireworksAiModels,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/genericOpenAi/index.js | server/utils/AiProviders/genericOpenAi/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
formatChatHistory,
writeResponseChunk,
clientAbortedHandler,
} = require("../../helpers/chat/responses");
const { toValidNumber } = require("../../http");
const { getAnythingLLMUserAgent } = require("../../../endpoints/utils");
class GenericOpenAiLLM {
constructor(embedder = null, modelPreference = null) {
const { OpenAI: OpenAIApi } = require("openai");
if (!process.env.GENERIC_OPEN_AI_BASE_PATH)
throw new Error(
"GenericOpenAI must have a valid base path to use for the api."
);
this.className = "GenericOpenAiLLM";
this.basePath = process.env.GENERIC_OPEN_AI_BASE_PATH;
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: process.env.GENERIC_OPEN_AI_API_KEY ?? null,
defaultHeaders: {
"User-Agent": getAnythingLLMUserAgent(),
},
});
this.model =
modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS
? toValidNumber(process.env.GENERIC_OPEN_AI_MAX_TOKENS, 1024)
: 1024;
if (!this.model)
throw new Error("GenericOpenAI must have a valid model set.");
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
if (process.env.GENERIC_OPENAI_STREAMING_DISABLED === "true") return false;
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(_modelName) {
const limit = process.env.GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No token context limit was set.");
return Number(limit);
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const limit = process.env.GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No token context limit was set.");
return Number(limit);
}
// Short circuit since we have no idea if the model is valid or not
// in pre-flight for generic endpoints
isValidChatCompletionModel(_modelName = "") {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
*
* ## Developer Note
* This function assumes the generic OpenAI provider is _actually_ OpenAI compatible.
* For example, Ollama is "OpenAI compatible" but does not support images as a content array.
* The contentString also is the base64 string WITH `data:image/xxx;base64,` prefix, which may not be the case for all providers.
* If your provider does not work exactly this way, then attachments will not function or potentially break vision requests.
* If you encounter this issue, you are welcome to open an issue asking for your specific provider to be supported.
*
* This function will **not** be updated for providers that **do not** support images as a content array like OpenAI does.
* Do not open issues to update this function due to your specific provider not being compatible. Open an issue to request support for your specific provider.
* @param {Object} props
* @param {string} props.userPrompt - the user prompt to be sent to the model
* @param {import("../../helpers").Attachment[]} props.attachments - the array of attachments to be sent to the model
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "high",
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
/**
* Parses and prepends reasoning from the response and returns the full text response.
* @param {Object} response
* @returns {string}
*/
#parseReasoningFromResponse({ message }) {
let textResponse = message?.content;
if (
!!message?.reasoning_content &&
message.reasoning_content.trim().length > 0
)
textResponse = `<think>${message.reasoning_content}</think>${textResponse}`;
return textResponse;
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
max_tokens: this.maxTokens,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: this.#parseReasoningFromResponse(result.output.choices[0]),
metrics: {
prompt_tokens: result.output?.usage?.prompt_tokens || 0,
completion_tokens: result.output?.usage?.completion_tokens || 0,
total_tokens: result.output?.usage?.total_tokens || 0,
outputTps:
(result.output?.usage?.completion_tokens || 0) / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
max_tokens: this.maxTokens,
}),
messages,
// runPromptTokenCalculation: true - There is not way to know if the generic provider connected is returning
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
// TODO: This is a copy of the generic handleStream function in responses.js
// to specifically handle the DeepSeek reasoning model `reasoning_content` field.
// When or if ever possible, we should refactor this to be in the generic function.
handleStream(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
let hasUsageMetrics = false;
let usage = {
completion_tokens: 0,
};
return new Promise(async (resolve) => {
let fullText = "";
let reasoningText = "";
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
const reasoningToken = message?.delta?.reasoning_content;
if (
chunk.hasOwnProperty("usage") && // exists
!!chunk.usage && // is not null
Object.values(chunk.usage).length > 0 // has values
) {
if (chunk.usage.hasOwnProperty("prompt_tokens")) {
usage.prompt_tokens = Number(chunk.usage.prompt_tokens);
}
if (chunk.usage.hasOwnProperty("completion_tokens")) {
hasUsageMetrics = true; // to stop estimating counter
usage.completion_tokens = Number(chunk.usage.completion_tokens);
}
}
// Reasoning models will always return the reasoning text before the token text.
if (reasoningToken) {
// If the reasoning text is empty (''), we need to initialize it
// and send the first chunk of reasoning text.
if (reasoningText.length === 0) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `<think>${reasoningToken}`,
close: false,
error: false,
});
reasoningText += `<think>${reasoningToken}`;
continue;
} else {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: reasoningToken,
close: false,
error: false,
});
reasoningText += reasoningToken;
}
}
// If the reasoning text is not empty, but the reasoning token is empty
// and the token text is not empty we need to close the reasoning text and begin sending the token text.
if (!!reasoningText && !reasoningToken && token) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `</think>`,
close: false,
error: false,
});
fullText += `${reasoningText}</think>`;
reasoningText = "";
}
if (token) {
fullText += token;
// If we never saw a usage metric, we can estimate them by number of completion chunks
if (!hasUsageMetrics) usage.completion_tokens++;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
if (
message?.hasOwnProperty("finish_reason") && // Got valid message and it is an object with finish_reason
message.finish_reason !== "" &&
message.finish_reason !== null
) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
break; // Break streaming when a valid finish_reason is first encountered
}
}
} catch (e) {
console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
writeResponseChunk(response, {
uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
stream?.endMeasurement(usage);
resolve(fullText);
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
GenericOpenAiLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/modelMap/index.js | server/utils/AiProviders/modelMap/index.js | const path = require("path");
const fs = require("fs");
const LEGACY_MODEL_MAP = require("./legacy");
class ContextWindowFinder {
static instance = null;
static modelMap = LEGACY_MODEL_MAP;
/**
* Mapping for AnythingLLM provider <> LiteLLM provider
* @type {Record<string, string>}
*/
static trackedProviders = {
anthropic: "anthropic",
openai: "openai",
cohere: "cohere_chat",
gemini: "vertex_ai-language-models",
groq: "groq",
xai: "xai",
deepseek: "deepseek",
moonshot: "moonshot",
zai: "vercel_ai_gateway", // Vercel has correct context windows for Z.AI models
};
static expiryMs = 1000 * 60 * 60 * 24 * 3; // 3 days
static remoteUrl =
"https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json";
cacheLocation = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "context-windows")
: path.resolve(__dirname, `../../../storage/models/context-windows`)
);
cacheFilePath = path.resolve(this.cacheLocation, "context-windows.json");
cacheFileExpiryPath = path.resolve(this.cacheLocation, ".cached_at");
seenStaleCacheWarning = false;
constructor() {
if (ContextWindowFinder.instance) return ContextWindowFinder.instance;
ContextWindowFinder.instance = this;
if (!fs.existsSync(this.cacheLocation))
fs.mkdirSync(this.cacheLocation, { recursive: true });
// If the cache is stale or not found at all, pull the model map from remote
if (this.isCacheStale || !fs.existsSync(this.cacheFilePath))
this.#pullRemoteModelMap();
}
log(text, ...args) {
console.log(`\x1b[33m[ContextWindowFinder]\x1b[0m ${text}`, ...args);
}
/**
* Checks if the cache is stale by checking if the cache file exists and if the cache file is older than the expiry time.
* @returns {boolean}
*/
get isCacheStale() {
if (!fs.existsSync(this.cacheFileExpiryPath)) return true;
const cachedAt = fs.readFileSync(this.cacheFileExpiryPath, "utf8");
return Date.now() - cachedAt > ContextWindowFinder.expiryMs;
}
/**
* Gets the cached model map.
*
* Always returns the available model map - even if it is expired since re-pulling
* the model map only occurs on container start/system start.
* @returns {Record<string, Record<string, number>> | null} - The cached model map
*/
get cachedModelMap() {
if (!fs.existsSync(this.cacheFilePath)) {
this.log(`\x1b[33m
--------------------------------
[WARNING] Model map cache is not found!
Invalid context windows will be returned leading to inaccurate model responses
or smaller context windows than expected.
You can fix this by restarting AnythingLLM so the model map is re-pulled.
--------------------------------\x1b[0m`);
return null;
}
if (this.isCacheStale && !this.seenStaleCacheWarning) {
this.log(
"Model map cache is stale - some model context windows may be incorrect. This is OK and the model map will be re-pulled on next boot."
);
this.seenStaleCacheWarning = true;
}
return JSON.parse(
fs.readFileSync(this.cacheFilePath, { encoding: "utf8" })
);
}
/**
* Pulls the remote model map from the remote URL, formats it and caches it.
* @returns {Record<string, Record<string, number>>} - The formatted model map
*/
async #pullRemoteModelMap() {
try {
this.log("Pulling remote model map...");
const remoteContexWindowMap = await fetch(ContextWindowFinder.remoteUrl)
.then((res) => {
if (res.status !== 200)
throw new Error(
"Failed to fetch remote model map - non 200 status code"
);
return res.json();
})
.then((data) => {
fs.writeFileSync(this.cacheFilePath, JSON.stringify(data, null, 2));
fs.writeFileSync(this.cacheFileExpiryPath, Date.now().toString());
this.log("Remote model map synced and cached");
return data;
})
.catch((error) => {
this.log("Error syncing remote model map", error);
return null;
});
if (!remoteContexWindowMap) return null;
const modelMap = this.#validateModelMap(
this.#formatModelMap(remoteContexWindowMap)
);
fs.writeFileSync(this.cacheFilePath, JSON.stringify(modelMap, null, 2));
fs.writeFileSync(this.cacheFileExpiryPath, Date.now().toString());
return modelMap;
} catch (error) {
this.log("Error syncing remote model map", error);
return null;
}
}
#validateModelMap(modelMap = {}) {
for (const [provider, models] of Object.entries(modelMap)) {
// If the models is null/falsey or has no keys, throw an error
if (typeof models !== "object")
throw new Error(
`Invalid model map for ${provider} - models is not an object`
);
if (!models || Object.keys(models).length === 0)
throw new Error(`Invalid model map for ${provider} - no models found!`);
// Validate that the context window is a number
for (const [model, contextWindow] of Object.entries(models)) {
if (isNaN(contextWindow) || contextWindow <= 0) {
this.log(
`${provider}:${model} - context window is not a positive number. Got ${contextWindow}.`
);
delete models[model];
continue;
}
}
}
return modelMap;
}
/**
* Formats the remote model map to a format that is compatible with how we store the model map
* for all providers who use it.
* @param {Record<string, any>} modelMap - The remote model map
* @returns {Record<string, Record<string, number>>} - The formatted model map
*/
#formatModelMap(modelMap = {}) {
const formattedModelMap = {};
for (const [provider, liteLLMProviderTag] of Object.entries(
ContextWindowFinder.trackedProviders
)) {
formattedModelMap[provider] = {};
const matches = Object.entries(modelMap).filter(
([_key, config]) => config.litellm_provider === liteLLMProviderTag
);
for (const [key, config] of matches) {
const contextWindow = Number(config.max_input_tokens);
if (isNaN(contextWindow)) continue;
// Some models have a provider/model-tag format, so we need to get the last part since we dont do paths
// for names with the exception of some router-providers like OpenRouter or Together.
const modelName = key.split("/").pop();
formattedModelMap[provider][modelName] = contextWindow;
}
}
return formattedModelMap;
}
/**
* Gets the context window for a given provider and model.
*
* If the provider is not found, null is returned.
* If the model is not found, the provider's entire model map is returned.
*
* if both provider and model are provided, the context window for the given model is returned.
* @param {string|null} provider - The provider to get the context window for
* @param {string|null} model - The model to get the context window for
* @returns {number|null} - The context window for the given provider and model
*/
get(provider = null, model = null) {
if (!provider || !this.cachedModelMap || !this.cachedModelMap[provider])
return null;
if (!model) return this.cachedModelMap[provider];
const modelContextWindow = this.cachedModelMap[provider][model];
if (!modelContextWindow) {
this.log("Invalid access to model context window - not found in cache", {
provider,
model,
});
return null;
}
return Number(modelContextWindow);
}
}
module.exports = { MODEL_MAP: new ContextWindowFinder() };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/modelMap/legacy.js | server/utils/AiProviders/modelMap/legacy.js | const LEGACY_MODEL_MAP = {
anthropic: {
"claude-instant-1.2": 100000,
"claude-2.0": 100000,
"claude-2.1": 200000,
"claude-3-haiku-20240307": 200000,
"claude-3-sonnet-20240229": 200000,
"claude-3-opus-20240229": 200000,
"claude-3-opus-latest": 200000,
"claude-3-5-haiku-latest": 200000,
"claude-3-5-haiku-20241022": 200000,
"claude-3-5-sonnet-latest": 200000,
"claude-3-5-sonnet-20241022": 200000,
"claude-3-5-sonnet-20240620": 200000,
"claude-3-7-sonnet-20250219": 200000,
"claude-3-7-sonnet-latest": 200000,
},
cohere: {
"command-r": 128000,
"command-r-plus": 128000,
command: 4096,
"command-light": 4096,
"command-nightly": 8192,
"command-light-nightly": 8192,
"command-r-plus-08-2024": 132096,
"command-a-03-2025": 288000,
"c4ai-aya-vision-32b": 16384,
"command-a-reasoning-08-2025": 288768,
"command-r-08-2024": 132096,
"c4ai-aya-vision-8b": 16384,
"command-r7b-12-2024": 132000,
"command-r7b-arabic-02-2025": 128000,
"command-a-vision-07-2025": 128000,
"c4ai-aya-expanse-8b": 8192,
"c4ai-aya-expanse-32b": 128000,
"command-a-translate-08-2025": 8992,
},
gemini: {
"gemini-1.5-pro-001": 2000000,
"gemini-1.5-pro-002": 2000000,
"gemini-1.5-pro": 2000000,
"gemini-1.5-flash-001": 1000000,
"gemini-1.5-flash": 1000000,
"gemini-1.5-flash-002": 1000000,
"gemini-1.5-flash-8b": 1000000,
"gemini-1.5-flash-8b-001": 1000000,
"gemini-2.0-flash": 1048576,
"gemini-2.0-flash-001": 1048576,
"gemini-2.0-flash-lite-001": 1048576,
"gemini-2.0-flash-lite": 1048576,
"gemini-1.5-pro-latest": 2000000,
"gemini-1.5-flash-latest": 1000000,
"gemini-1.5-flash-8b-latest": 1000000,
"gemini-1.5-flash-8b-exp-0827": 1000000,
"gemini-1.5-flash-8b-exp-0924": 1000000,
"gemini-2.5-pro-exp-03-25": 1048576,
"gemini-2.5-pro-preview-03-25": 1048576,
"gemini-2.0-flash-exp": 1048576,
"gemini-2.0-flash-exp-image-generation": 1048576,
"gemini-2.0-flash-lite-preview-02-05": 1048576,
"gemini-2.0-flash-lite-preview": 1048576,
"gemini-2.0-pro-exp": 1048576,
"gemini-2.0-pro-exp-02-05": 1048576,
"gemini-exp-1206": 1048576,
"gemini-2.0-flash-thinking-exp-01-21": 1048576,
"gemini-2.0-flash-thinking-exp": 1048576,
"gemini-2.0-flash-thinking-exp-1219": 1048576,
"learnlm-1.5-pro-experimental": 32767,
"gemma-3-1b-it": 32768,
"gemma-3-4b-it": 32768,
"gemma-3-12b-it": 32768,
"gemma-3-27b-it": 131072,
},
groq: {
"gemma2-9b-it": 8192,
"gemma-7b-it": 8192,
"llama3-70b-8192": 8192,
"llama3-8b-8192": 8192,
"llama-3.1-70b-versatile": 8000,
"llama-3.1-8b-instant": 8000,
"mixtral-8x7b-32768": 32768,
},
openai: {
"gpt-3.5-turbo": 16385,
"gpt-3.5-turbo-1106": 16385,
"gpt-4o": 128000,
"gpt-4o-2024-08-06": 128000,
"gpt-4o-2024-05-13": 128000,
"gpt-4o-mini": 128000,
"gpt-4o-mini-2024-07-18": 128000,
"gpt-4-turbo": 128000,
"gpt-4-1106-preview": 128000,
"gpt-4-turbo-preview": 128000,
"gpt-4": 8192,
"gpt-4-32k": 32000,
"gpt-4.1": 1047576,
"gpt-4.1-2025-04-14": 1047576,
"gpt-4.1-mini": 1047576,
"gpt-4.1-mini-2025-04-14": 1047576,
"gpt-4.1-nano": 1047576,
"gpt-4.1-nano-2025-04-14": 1047576,
"gpt-4.5-preview": 128000,
"gpt-4.5-preview-2025-02-27": 128000,
"o1-preview": 128000,
"o1-preview-2024-09-12": 128000,
"o1-mini": 128000,
"o1-mini-2024-09-12": 128000,
o1: 200000,
"o1-2024-12-17": 200000,
"o1-pro": 200000,
"o1-pro-2025-03-19": 200000,
"o3-mini": 200000,
"o3-mini-2025-01-31": 200000,
},
deepseek: {
"deepseek-chat": 128000,
"deepseek-coder": 128000,
"deepseek-reasoner": 128000,
},
xai: {
"grok-beta": 131072,
},
giteeai: {
"Qwen2.5-72B-Instruct": 16_384,
"Qwen2.5-14B-Instruct": 24_576,
"Qwen2-7B-Instruct": 24_576,
"Qwen2.5-32B-Instruct": 32_768,
"Qwen2-72B-Instruct": 32_768,
"Qwen2-VL-72B": 32_768,
"QwQ-32B-Preview": 32_768,
"Yi-34B-Chat": 4_096,
"glm-4-9b-chat": 32_768,
"deepseek-coder-33B-instruct": 8_192,
"codegeex4-all-9b": 32_768,
"InternVL2-8B": 32_768,
"InternVL2.5-26B": 32_768,
"InternVL2.5-78B": 32_768,
"DeepSeek-R1-Distill-Qwen-32B": 32_768,
"DeepSeek-R1-Distill-Qwen-1.5B": 32_768,
"DeepSeek-R1-Distill-Qwen-14B": 32_768,
"DeepSeek-R1-Distill-Qwen-7B": 32_768,
"DeepSeek-V3": 32_768,
"DeepSeek-R1": 32_768,
},
};
module.exports = LEGACY_MODEL_MAP;
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/xai/index.js | server/utils/AiProviders/xai/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
formatChatHistory,
} = require("../../helpers/chat/responses");
const { MODEL_MAP } = require("../modelMap");
class XAiLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.XAI_LLM_API_KEY)
throw new Error("No xAI API key was set.");
this.className = "XAiLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
baseURL: "https://api.x.ai/v1",
apiKey: process.env.XAI_LLM_API_KEY,
});
this.model =
modelPreference || process.env.XAI_LLM_MODEL_PREF || "grok-beta";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log(
`Initialized ${this.model} with context window ${this.promptWindowLimit()}`
);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
return MODEL_MAP.get("xai", modelName) ?? 131_072;
}
promptWindowLimit() {
return MODEL_MAP.get("xai", this.model) ?? 131_072;
}
isValidChatCompletionModel(_modelName = "") {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "high",
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [], // This is the specific attachment for only this prompt
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`xAI chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.isValidChatCompletionModel(this.model))
throw new Error(
`xAI chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
XAiLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/cometapi/index.js | server/utils/AiProviders/cometapi/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { v4: uuidv4 } = require("uuid");
const {
writeResponseChunk,
clientAbortedHandler,
formatChatHistory,
} = require("../../helpers/chat/responses");
const fs = require("fs");
const path = require("path");
const { safeJsonParse } = require("../../http");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const { COMETAPI_IGNORE_PATTERNS } = require("./constants");
const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "cometapi")
: path.resolve(__dirname, `../../../storage/models/cometapi`)
);
class CometApiLLM {
defaultTimeout = 3_000;
constructor(embedder = null, modelPreference = null) {
if (!process.env.COMETAPI_LLM_API_KEY)
throw new Error("No CometAPI API key was set.");
this.className = "CometApiLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.basePath = "https://api.cometapi.com/v1";
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: process.env.COMETAPI_LLM_API_KEY ?? null,
defaultHeaders: {
"HTTP-Referer": "https://anythingllm.com",
"X-CometAPI-Source": "anythingllm",
},
});
this.model =
modelPreference || process.env.COMETAPI_LLM_MODEL_PREF || "gpt-5-mini";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.timeout = this.#parseTimeout();
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
this.log(`Loaded with model: ${this.model}`);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
/**
* CometAPI has various models that never return `finish_reasons` and thus leave the stream open
* which causes issues in subsequent messages. This timeout value forces us to close the stream after
* x milliseconds. This is a configurable value via the COMETAPI_LLM_TIMEOUT_MS value
* @returns {number} The timeout value in milliseconds (default: 3_000)
*/
#parseTimeout() {
this.log(
`CometAPI timeout is set to ${process.env.COMETAPI_LLM_TIMEOUT_MS ?? this.defaultTimeout}ms`
);
if (isNaN(Number(process.env.COMETAPI_LLM_TIMEOUT_MS)))
return this.defaultTimeout;
const setValue = Number(process.env.COMETAPI_LLM_TIMEOUT_MS);
if (setValue < 500) return 500;
return setValue;
}
// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
// from the current date. If it is, then we will refetch the API so that all the models are up
// to date.
#cacheIsStale() {
const MAX_STALE = 6.048e8; // 1 Week in MS
if (!fs.existsSync(this.cacheAtPath)) return true;
const now = Number(new Date());
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
return now - timestampMs > MAX_STALE;
}
// The CometAPI model API has a lot of models, so we cache this locally in the directory
// as if the cache directory JSON file is stale or does not exist we will fetch from API and store it.
// This might slow down the first request, but we need the proper token context window
// for each model and this is a constructor property - so we can really only get it if this cache exists.
// We used to have this as a chore, but given there is an API to get the info - this makes little sense.
async #syncModels() {
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
return false;
this.log(
"Model cache is not present or stale. Fetching from CometAPI API."
);
await fetchCometApiModels();
return;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
const cacheModelPath = path.resolve(cacheFolder, "models.json");
const availableModels = fs.existsSync(cacheModelPath)
? safeJsonParse(
fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
{}
)
: {};
return availableModels[modelName]?.maxLength || 4096;
}
promptWindowLimit() {
const availableModels = this.models();
return availableModels[this.model]?.maxLength || 4096;
}
async isValidChatCompletionModel(model = "") {
await this.#syncModels();
const availableModels = this.models();
return availableModels.hasOwnProperty(model);
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`CometAPI chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`CometAPI chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
/**
* Handles the default stream response for a chat.
* @param {import("express").Response} response
* @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
* @param {Object} responseProps
* @returns {Promise<string>}
*/
handleStream(response, stream, responseProps) {
const timeoutThresholdMs = this.timeout;
const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => {
let fullText = "";
let lastChunkTime = null; // null when first token is still not received.
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
// NOTICE: Not all CometAPI models will return a stop reason
// which keeps the connection open and so the model never finalizes the stream
// like the traditional OpenAI response schema does. So in the case the response stream
// never reaches a formal close state we maintain an interval timer that if we go >=timeoutThresholdMs with
// no new chunks then we kill the stream and assume it to be complete. CometAPI is quite fast
// so this threshold should permit most responses, but we can adjust `timeoutThresholdMs` if
// we find it is too aggressive.
const timeoutCheck = setInterval(() => {
if (lastChunkTime === null) return;
const now = Number(new Date());
const diffMs = now - lastChunkTime;
if (diffMs >= timeoutThresholdMs) {
this.log(
`CometAPI stream did not self-close and has been stale for >${timeoutThresholdMs}ms. Closing response stream.`
);
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
clearInterval(timeoutCheck);
response.removeListener("close", handleAbort);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
}, 500);
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
lastChunkTime = Number(new Date());
if (token) {
fullText += token;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
if (message.finish_reason !== null) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
}
} catch (e) {
writeResponseChunk(response, {
uuid,
sources,
type: "abort",
textResponse: null,
close: true,
error: e.message,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
/**
* Fetches available models from CometAPI and filters out non-chat models
* Based on cometapi.md specifications
*/
async function fetchCometApiModels() {
return await fetch(`https://api.cometapi.com/v1/models`, {
method: "GET",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${process.env.COMETAPI_LLM_API_KEY}`,
},
})
.then((res) => res.json())
.then(({ data = [] }) => {
const models = {};
// Filter out non-chat models using patterns from cometapi.md
const chatModels = data.filter((model) => {
const modelId = model.id.toLowerCase();
return !COMETAPI_IGNORE_PATTERNS.some((pattern) =>
modelId.includes(pattern.toLowerCase())
);
});
chatModels.forEach((model) => {
models[model.id] = {
id: model.id,
name: model.id, // CometAPI has limited model info according to cometapi.md
organization:
model.id.split("/")[0] || model.id.split("-")[0] || "CometAPI",
maxLength: model.context_length || 4096, // Conservative default
};
});
// Cache all response information
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(models),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return models;
})
.catch((e) => {
console.error("Error fetching CometAPI models:", e);
return {};
});
}
module.exports = {
CometApiLLM,
fetchCometApiModels,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/cometapi/constants.js | server/utils/AiProviders/cometapi/constants.js | // TODO: When CometAPI's model list is upgraded, this operation needs to be removed
// Model filtering patterns from cometapi.md that are not supported by AnythingLLM
module.exports.COMETAPI_IGNORE_PATTERNS = [
// Image generation models
"dall-e",
"dalle",
"midjourney",
"mj_",
"stable-diffusion",
"sd-",
"flux-",
"playground-v",
"ideogram",
"recraft-",
"black-forest-labs",
"/recraft-v3",
"recraftv3",
"stability-ai/",
"sdxl",
// Audio generation models
"suno_",
"tts",
"whisper",
// Video generation models
"runway",
"luma_",
"luma-",
"veo",
"kling_",
"minimax_video",
"hunyuan-t1",
// Utility models
"embedding",
"search-gpts",
"files_retrieve",
"moderation",
// Deepl
"deepl",
];
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/openRouter/index.js | server/utils/AiProviders/openRouter/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { v4: uuidv4 } = require("uuid");
const {
writeResponseChunk,
clientAbortedHandler,
formatChatHistory,
} = require("../../helpers/chat/responses");
const fs = require("fs");
const path = require("path");
const { safeJsonParse } = require("../../http");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "openrouter")
: path.resolve(__dirname, `../../../storage/models/openrouter`)
);
class OpenRouterLLM {
/**
* Some openrouter models never send a finish_reason and thus leave the stream open in the UI.
* However, because OR is a middleware it can also wait an inordinately long time between chunks so we need
* to ensure that we dont accidentally close the stream too early. If the time between chunks is greater than this timeout
* we will close the stream and assume it to be complete. This is common for free models or slow providers they can
* possibly delegate to during invocation.
* @type {number}
*/
defaultTimeout = 3_000;
constructor(embedder = null, modelPreference = null) {
if (!process.env.OPENROUTER_API_KEY)
throw new Error("No OpenRouter API key was set.");
this.className = "OpenRouterLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.basePath = "https://openrouter.ai/api/v1";
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: process.env.OPENROUTER_API_KEY ?? null,
defaultHeaders: {
"HTTP-Referer": "https://anythingllm.com",
"X-Title": "AnythingLLM",
},
});
this.model =
modelPreference || process.env.OPENROUTER_MODEL_PREF || "openrouter/auto";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.timeout = this.#parseTimeout();
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
this.log("Initialized with model:", this.model);
}
/**
* Returns true if the model is a Perplexity model.
* OpenRouter has support for a lot of models and we have some special handling for Perplexity models
* that support in-line citations.
* @returns {boolean}
*/
get isPerplexityModel() {
return this.model.startsWith("perplexity/");
}
/**
* Generic formatting of a token for the following use cases:
* - Perplexity models that return inline citations in the token text
* @param {{token: string, citations: string[]}} options - The token text and citations.
* @returns {string} - The formatted token text.
*/
enrichToken({ token, citations = [] }) {
if (!Array.isArray(citations) || citations.length === 0) return token;
return token.replace(/\[(\d+)\]/g, (match, index) => {
const citationIndex = parseInt(index) - 1;
return citations[citationIndex]
? `[[${index}](${citations[citationIndex]})]`
: match;
});
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
/**
* OpenRouter has various models that never return `finish_reasons` and thus leave the stream open
* which causes issues in subsequent messages. This timeout value forces us to close the stream after
* x milliseconds. This is a configurable value via the OPENROUTER_TIMEOUT_MS value
* @returns {number} The timeout value in milliseconds (default: 3_000)
*/
#parseTimeout() {
this.log(
`OpenRouter timeout is set to ${process.env.OPENROUTER_TIMEOUT_MS ?? this.defaultTimeout}ms`
);
if (isNaN(Number(process.env.OPENROUTER_TIMEOUT_MS)))
return this.defaultTimeout;
const setValue = Number(process.env.OPENROUTER_TIMEOUT_MS);
if (setValue < 500) return 500; // 500ms is the minimum timeout
return setValue;
}
// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
// from the current date. If it is, then we will refetch the API so that all the models are up
// to date.
#cacheIsStale() {
const MAX_STALE = 6.048e8; // 1 Week in MS
if (!fs.existsSync(this.cacheAtPath)) return true;
const now = Number(new Date());
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
return now - timestampMs > MAX_STALE;
}
// The OpenRouter model API has a lot of models, so we cache this locally in the directory
// as if the cache directory JSON file is stale or does not exist we will fetch from API and store it.
// This might slow down the first request, but we need the proper token context window
// for each model and this is a constructor property - so we can really only get it if this cache exists.
// We used to have this as a chore, but given there is an API to get the info - this makes little sense.
async #syncModels() {
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
return false;
this.log(
"Model cache is not present or stale. Fetching from OpenRouter API."
);
await fetchOpenRouterModels();
return;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
const cacheModelPath = path.resolve(cacheFolder, "models.json");
const availableModels = fs.existsSync(cacheModelPath)
? safeJsonParse(
fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
{}
)
: {};
return availableModels[modelName]?.maxLength || 4096;
}
promptWindowLimit() {
const availableModels = this.models();
return availableModels[this.model]?.maxLength || 4096;
}
async isValidChatCompletionModel(model = "") {
await this.#syncModels();
const availableModels = this.models();
return availableModels.hasOwnProperty(model);
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
/**
* Parses and prepends reasoning from the response and returns the full text response.
* @param {Object} response
* @returns {string}
*/
#parseReasoningFromResponse({ message }) {
let textResponse = message?.content;
if (!!message?.reasoning && message.reasoning.trim().length > 0)
textResponse = `<think>${message.reasoning}</think>${textResponse}`;
return textResponse;
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7, user = null }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`OpenRouter chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
// This is an OpenRouter specific option that allows us to get the reasoning text
// before the token text.
include_reasoning: true,
user: user?.id ? `user_${user.id}` : "",
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result?.output?.hasOwnProperty("choices") ||
result?.output?.choices?.length === 0
)
throw new Error(
`Invalid response body returned from OpenRouter: ${result.output?.error?.message || "Unknown error"} ${result.output?.error?.code || "Unknown code"}`
);
return {
textResponse: this.#parseReasoningFromResponse(result.output.choices[0]),
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(
messages = null,
{ temperature = 0.7, user = null }
) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`OpenRouter chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
// This is an OpenRouter specific option that allows us to get the reasoning text
// before the token text.
include_reasoning: true,
user: user?.id ? `user_${user.id}` : "",
}),
messages,
// We have to manually count the tokens
// OpenRouter has a ton of providers and they all can return slightly differently
// some return chunk.usage on STOP, some do it after stop, its inconsistent.
// So it is possible reported metrics are inaccurate since we cannot reliably
// catch the metrics before resolving the stream - so we just pretend this functionality
// is not available.
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
/**
* Handles the default stream response for a chat.
* @param {import("express").Response} response
* @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
* @param {Object} responseProps
* @returns {Promise<string>}
*/
handleStream(response, stream, responseProps) {
const timeoutThresholdMs = this.timeout;
const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => {
let fullText = "";
let reasoningText = "";
let lastChunkTime = null; // null when first token is still not received.
let pplxCitations = []; // Array of inline citations for Perplexity models (if applicable)
let isPerplexity = this.isPerplexityModel;
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
// NOTICE: Not all OpenRouter models will return a stop reason
// which keeps the connection open and so the model never finalizes the stream
// like the traditional OpenAI response schema does. So in the case the response stream
// never reaches a formal close state we maintain an interval timer that if we go >=timeoutThresholdMs with
// no new chunks then we kill the stream and assume it to be complete. OpenRouter is quite fast
// so this threshold should permit most responses, but we can adjust `timeoutThresholdMs` if
// we find it is too aggressive.
const timeoutCheck = setInterval(() => {
if (lastChunkTime === null) return;
const now = Number(new Date());
const diffMs = now - lastChunkTime;
if (diffMs >= timeoutThresholdMs) {
console.log(
`OpenRouter stream did not self-close and has been stale for >${timeoutThresholdMs}ms. Closing response stream.`
);
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
clearInterval(timeoutCheck);
response.removeListener("close", handleAbort);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
}, 500);
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
const reasoningToken = message?.delta?.reasoning;
lastChunkTime = Number(new Date());
// Some models will return citations (e.g. Perplexity) - we should preserve them for inline citations if applicable.
if (
isPerplexity &&
Array.isArray(chunk?.citations) &&
chunk?.citations?.length !== 0
)
pplxCitations.push(...chunk.citations);
// Reasoning models will always return the reasoning text before the token text.
// can be null or ''
if (reasoningToken) {
const formattedReasoningToken = this.enrichToken({
token: reasoningToken,
citations: pplxCitations,
});
// If the reasoning text is empty (''), we need to initialize it
// and send the first chunk of reasoning text.
if (reasoningText.length === 0) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `<think>${formattedReasoningToken}`,
close: false,
error: false,
});
reasoningText += `<think>${formattedReasoningToken}`;
continue;
} else {
// If the reasoning text is not empty, we need to append the reasoning text
// to the existing reasoning text.
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: formattedReasoningToken,
close: false,
error: false,
});
reasoningText += formattedReasoningToken;
}
}
// If the reasoning text is not empty, but the reasoning token is empty
// and the token text is not empty we need to close the reasoning text and begin sending the token text.
if (!!reasoningText && !reasoningToken && token) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `</think>`,
close: false,
error: false,
});
fullText += `${reasoningText}</think>`;
reasoningText = "";
}
if (token) {
const formattedToken = this.enrichToken({
token,
citations: pplxCitations,
});
fullText += formattedToken;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: formattedToken,
close: false,
error: false,
});
}
if (message.finish_reason !== null) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
clearInterval(timeoutCheck);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
}
} catch (e) {
writeResponseChunk(response, {
uuid,
sources,
type: "abort",
textResponse: null,
close: true,
error: e.message,
});
response.removeListener("close", handleAbort);
clearInterval(timeoutCheck);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
async function fetchOpenRouterModels() {
return await fetch(`https://openrouter.ai/api/v1/models`, {
method: "GET",
headers: {
"Content-Type": "application/json",
},
})
.then((res) => res.json())
.then(({ data = [] }) => {
const models = {};
data.forEach((model) => {
models[model.id] = {
id: model.id,
name: model.name,
organization:
model.id.split("/")[0].charAt(0).toUpperCase() +
model.id.split("/")[0].slice(1),
maxLength: model.context_length,
};
});
// Cache all response information
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(models),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return models;
})
.catch((e) => {
console.error(e);
return {};
});
}
module.exports = {
OpenRouterLLM,
fetchOpenRouterModels,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/foundry/index.js | server/utils/AiProviders/foundry/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
formatChatHistory,
} = require("../../helpers/chat/responses");
const { OpenAI: OpenAIApi } = require("openai");
class FoundryLLM {
/** @see FoundryLLM.cacheContextWindows */
static modelContextWindows = {};
constructor(embedder = null, modelPreference = null) {
if (!process.env.FOUNDRY_BASE_PATH)
throw new Error("No Foundry Base Path was set.");
this.className = "FoundryLLM";
this.model = modelPreference || process.env.FOUNDRY_MODEL_PREF;
this.openai = new OpenAIApi({
baseURL: parseFoundryBasePath(process.env.FOUNDRY_BASE_PATH),
apiKey: null,
});
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
FoundryLLM.cacheContextWindows(true).then(() => {
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.#log(
`Loaded with model: ${this.model} with context window: ${this.promptWindowLimit()}`
);
});
}
static #slog(text, ...args) {
console.log(`\x1b[36m[FoundryLLM]\x1b[0m ${text}`, ...args);
}
#log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
/**
* Cache the context windows for the Foundry models.
* This is done once and then cached for the lifetime of the server. This is absolutely necessary to ensure that the context windows are correct.
* Foundry Local has a weird behavior that when max_completion_tokens is unset it will only allow the output to be 1024 tokens.
*
* If you pass in too large of a max_completion_tokens, it will throw an error.
* If you pass in too little of a max_completion_tokens, you will get stubbed outputs before you reach a real "stop" token.
* So we need to cache the context windows and use them for the lifetime of the server.
* @param {boolean} force
* @returns
*/
static async cacheContextWindows(force = false) {
try {
// Skip if we already have cached context windows and we're not forcing a refresh
if (Object.keys(FoundryLLM.modelContextWindows).length > 0 && !force)
return;
const openai = new OpenAIApi({
baseURL: parseFoundryBasePath(process.env.FOUNDRY_BASE_PATH),
apiKey: null,
});
(await openai.models.list().then((result) => result.data)).map(
(model) => {
const contextWindow =
Number(model.maxInputTokens) + Number(model.maxOutputTokens);
FoundryLLM.modelContextWindows[model.id] = contextWindow;
}
);
FoundryLLM.#slog(`Context windows cached for all models!`);
} catch (e) {
FoundryLLM.#slog(`Error caching context windows: ${e.message}`);
return;
}
}
/**
* Unload a model from the Foundry engine forcefully
* If the model is invalid, we just ignore the error. This is a util
* simply to have the foundry engine drop the resources for the model.
*
* @param {string} modelName
* @returns {Promise<boolean>}
*/
static async unloadModelFromEngine(modelName) {
const basePath = parseFoundryBasePath(process.env.FOUNDRY_BASE_PATH);
const baseUrl = new URL(basePath);
baseUrl.pathname = `/openai/unload/${modelName}`;
baseUrl.searchParams.set("force", "true");
return await fetch(baseUrl.toString())
.then((res) => res.json())
.catch(() => null);
}
static promptWindowLimit(modelName) {
let userDefinedLimit = null;
const systemDefinedLimit =
Number(this.modelContextWindows[modelName]) || 4096;
if (
process.env.FOUNDRY_MODEL_TOKEN_LIMIT &&
!isNaN(Number(process.env.FOUNDRY_MODEL_TOKEN_LIMIT)) &&
Number(process.env.FOUNDRY_MODEL_TOKEN_LIMIT) > 0
)
userDefinedLimit = Number(process.env.FOUNDRY_MODEL_TOKEN_LIMIT);
// The user defined limit is always higher priority than the context window limit, but it cannot be higher than the context window limit
// so we return the minimum of the two, if there is no user defined limit, we return the system defined limit as-is.
if (userDefinedLimit !== null)
return Math.min(userDefinedLimit, systemDefinedLimit);
return systemDefinedLimit;
}
promptWindowLimit() {
return this.constructor.promptWindowLimit(this.model);
}
async isValidChatCompletionModel(_ = "") {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`Foundry chat: ${this.model} is not valid or defined model for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
max_completion_tokens: this.promptWindowLimit(),
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`Foundry chat: ${this.model} is not valid or defined model for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
max_completion_tokens: this.promptWindowLimit(),
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
/**
* Parse the base path for the Foundry container API. Since the base path must end in /v1 and cannot have a trailing slash,
* and the user can possibly set it to anything and likely incorrectly due to pasting behaviors, we need to ensure it is in the correct format.
* @param {string} basePath
* @returns {string}
*/
function parseFoundryBasePath(providedBasePath = "") {
try {
const baseURL = new URL(providedBasePath);
const basePath = `${baseURL.origin}/v1`;
return basePath;
} catch (e) {
return providedBasePath;
}
}
module.exports = {
FoundryLLM,
parseFoundryBasePath,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/liteLLM/index.js | server/utils/AiProviders/liteLLM/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
formatChatHistory,
} = require("../../helpers/chat/responses");
class LiteLLM {
constructor(embedder = null, modelPreference = null) {
const { OpenAI: OpenAIApi } = require("openai");
if (!process.env.LITE_LLM_BASE_PATH)
throw new Error(
"LiteLLM must have a valid base path to use for the api."
);
this.className = "LiteLLM";
this.basePath = process.env.LITE_LLM_BASE_PATH;
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: process.env.LITE_LLM_API_KEY ?? null,
});
this.model = modelPreference ?? process.env.LITE_LLM_MODEL_PREF ?? null;
this.maxTokens = process.env.LITE_LLM_MODEL_TOKEN_LIMIT ?? 1024;
if (!this.model) throw new Error("LiteLLM must have a valid model set.");
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log(`Inference API: ${this.basePath} Model: ${this.model}`);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(_modelName) {
const limit = process.env.LITE_LLM_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No token context limit was set.");
return Number(limit);
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const limit = process.env.LITE_LLM_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No token context limit was set.");
return Number(limit);
}
// Short circuit since we have no idea if the model is valid or not
// in pre-flight for generic endpoints
isValidChatCompletionModel(_modelName = "") {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
max_tokens: parseInt(this.maxTokens), // LiteLLM requires int
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage?.prompt_tokens || 0,
completion_tokens: result.output.usage?.completion_tokens || 0,
total_tokens: result.output.usage?.total_tokens || 0,
outputTps:
(result.output.usage?.completion_tokens || 0) / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
max_tokens: parseInt(this.maxTokens), // LiteLLM requires int
}),
messages,
// runPromptTokenCalculation: true - We manually count the tokens because they may or may not be provided in the stream
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
LiteLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/groq/index.js | server/utils/AiProviders/groq/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
const { MODEL_MAP } = require("../modelMap");
class GroqLLM {
constructor(embedder = null, modelPreference = null) {
const { OpenAI: OpenAIApi } = require("openai");
if (!process.env.GROQ_API_KEY) throw new Error("No Groq API key was set.");
this.openai = new OpenAIApi({
baseURL: "https://api.groq.com/openai/v1",
apiKey: process.env.GROQ_API_KEY,
});
this.model =
modelPreference || process.env.GROQ_MODEL_PREF || "llama-3.1-8b-instant";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
#log(text, ...args) {
console.log(`\x1b[32m[GroqAi]\x1b[0m ${text}`, ...args);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
return MODEL_MAP.get("groq", modelName) ?? 8192;
}
promptWindowLimit() {
return MODEL_MAP.get("groq", this.model) ?? 8192;
}
async isValidChatCompletionModel(modelName = "") {
return !!modelName; // name just needs to exist
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) return userPrompt;
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
},
});
}
return content.flat();
}
/**
* Last Updated: October 21, 2024
* According to https://console.groq.com/docs/vision
* the vision models supported all make a mess of prompting depending on the model.
* Currently the llama3.2 models are only in preview and subject to change and the llava model is deprecated - so we will not support attachments for that at all.
*
* Since we can only explicitly support the current models, this is a temporary solution.
* If the attachments are empty or the model is not a vision model, we will return the default prompt structure which will work for all models.
* If the attachments are present and the model is a vision model - we only return the user prompt with attachments - see comment at end of function for more.
*
* Historical attachments are also omitted from prompt chat history for the reasons above. (TDC: Dec 30, 2024)
*/
#conditionalPromptStruct({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [], // This is the specific attachment for only this prompt
}) {
const VISION_MODELS = [
"llama-3.2-90b-vision-preview",
"llama-3.2-11b-vision-preview",
];
const DEFAULT_PROMPT_STRUCT = [
{
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
},
...chatHistory,
{ role: "user", content: userPrompt },
];
// If there are no attachments or model is not a vision model, return the default prompt structure
// as there is nothing to attach or do and no model limitations to consider
if (!attachments.length) return DEFAULT_PROMPT_STRUCT;
if (!VISION_MODELS.includes(this.model)) {
this.#log(
`${this.model} is not an explicitly supported vision model! Will omit attachments.`
);
return DEFAULT_PROMPT_STRUCT;
}
return [
// Why is the system prompt and history commented out?
// The current vision models for Groq perform VERY poorly with ANY history or text prior to the image.
// In order to not get LLM refusals for every single message, we will not include the "system prompt" or even the chat history.
// This is a temporary solution until Groq fixes their vision models to be more coherent and also handle context prior to the image.
// Note for the future:
// Groq vision models also do not support system prompts - which is why you see the user/assistant emulation used instead of "system".
// This means any vision call is assessed independently of the chat context prior to the image.
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// {
// role: "user",
// content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
// },
// {
// role: "assistant",
// content: "OK",
// },
// ...chatHistory,
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [], // This is the specific attachment for only this prompt
}) {
// NOTICE: SEE GroqLLM.#conditionalPromptStruct for more information on how attachments are handled with Groq.
return this.#conditionalPromptStruct({
systemPrompt,
contextTexts,
chatHistory,
userPrompt,
attachments,
});
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`GroqAI:chatCompletion: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps:
result.output.usage.completion_tokens /
result.output.usage.completion_time,
duration: result.output.usage.total_time,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`GroqAI:streamChatCompletion: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
GroqLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/azureOpenAi/index.js | server/utils/AiProviders/azureOpenAi/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
formatChatHistory,
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
class AzureOpenAiLLM {
constructor(embedder = null, modelPreference = null) {
const { OpenAI } = require("openai");
if (!process.env.AZURE_OPENAI_ENDPOINT)
throw new Error("No Azure API endpoint was set.");
if (!process.env.AZURE_OPENAI_KEY)
throw new Error("No Azure API key was set.");
this.openai = new OpenAI({
apiKey: process.env.AZURE_OPENAI_KEY,
baseURL: AzureOpenAiLLM.formatBaseUrl(process.env.AZURE_OPENAI_ENDPOINT),
});
this.model = modelPreference ?? process.env.OPEN_MODEL_PREF;
/*
Note: Azure OpenAI deployments do not expose model metadata that would allow us to
programmatically detect whether the deployment uses a reasoning model (o1, o1-mini, o3-mini, etc.).
As a result, we rely on the user to explicitly set AZURE_OPENAI_MODEL_TYPE="reasoning"
when using reasoning models, as incorrect configuration might result in chat errors.
*/
this.isOTypeModel =
process.env.AZURE_OPENAI_MODEL_TYPE === "reasoning" || false;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.#log(
`Initialized. Model "${this.model}" @ ${this.promptWindowLimit()} tokens.\nAPI-Version: ${this.apiVersion}.\nModel Type: ${this.isOTypeModel ? "reasoning" : "default"}`
);
}
/**
* Formats the Azure OpenAI endpoint URL to the correct format.
* @param {string} azureOpenAiEndpoint - The Azure OpenAI endpoint URL.
* @returns {string} The formatted URL.
*/
static formatBaseUrl(azureOpenAiEndpoint) {
try {
const url = new URL(azureOpenAiEndpoint);
url.pathname = "/openai/v1";
url.protocol = "https";
url.search = "";
url.hash = "";
return url.href;
} catch (error) {
throw new Error(
`"${azureOpenAiEndpoint}" is not a valid URL. Check your settings for the Azure OpenAI provider and set a valid endpoint URL.`
);
}
}
#log(text, ...args) {
console.log(`\x1b[32m[AzureOpenAi]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(_modelName) {
return !!process.env.AZURE_OPENAI_TOKEN_LIMIT
? Number(process.env.AZURE_OPENAI_TOKEN_LIMIT)
: 4096;
}
// Sure the user selected a proper value for the token limit
// could be any of these https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-4-models
// and if undefined - assume it is the lowest end.
promptWindowLimit() {
return !!process.env.AZURE_OPENAI_TOKEN_LIMIT
? Number(process.env.AZURE_OPENAI_TOKEN_LIMIT)
: 4096;
}
isValidChatCompletionModel(_modelName = "") {
// The Azure user names their "models" as deployments and they can be any name
// so we rely on the user to put in the correct deployment as only they would
// know it.
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
},
});
}
return content.flat();
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [], // This is the specific attachment for only this prompt
}) {
const prompt = {
role: this.isOTypeModel ? "user" : "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = [], { temperature = 0.7 }) {
if (!this.model)
throw new Error(
"No OPEN_MODEL_PREF ENV defined. This must the name of a deployment on your Azure account for an LLM chat model like GPT-3.5."
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions.create({
messages,
model: this.model,
...(this.isOTypeModel ? {} : { temperature }),
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = [], { temperature = 0.7 }) {
if (!this.model)
throw new Error(
"No OPEN_MODEL_PREF ENV defined. This must the name of a deployment on your Azure account for an LLM chat model like GPT-3.5."
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: await this.openai.chat.completions.create({
messages,
model: this.model,
...(this.isOTypeModel ? {} : { temperature }),
n: 1,
stream: true,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
AzureOpenAiLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/zai/index.js | server/utils/AiProviders/zai/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
const { MODEL_MAP } = require("../modelMap");
class ZAiLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.ZAI_API_KEY) throw new Error("No Z.AI API key was set.");
this.className = "ZAiLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
baseURL: "https://api.z.ai/api/paas/v4",
apiKey: process.env.ZAI_API_KEY,
});
this.model = modelPreference || process.env.ZAI_MODEL_PREF || "glm-4.5";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log(
`Initialized ${this.model} with context window ${this.promptWindowLimit()}`
);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
return MODEL_MAP.get("zai", modelName) ?? 131072;
}
promptWindowLimit() {
return MODEL_MAP.get("zai", this.model) ?? 131072;
}
async isValidChatCompletionModel(modelName = "") {
return !!modelName; // name just needs to exist
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) return userPrompt;
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...chatHistory,
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage?.prompt_tokens || 0,
completion_tokens: result.output.usage?.completion_tokens || 0,
total_tokens: result.output.usage?.total_tokens || 0,
outputTps: result.output.usage?.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
ZAiLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/lmStudio/index.js | server/utils/AiProviders/lmStudio/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
handleDefaultStreamResponseV2,
formatChatHistory,
} = require("../../helpers/chat/responses");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const { OpenAI: OpenAIApi } = require("openai");
// hybrid of openAi LLM chat completion for LMStudio
class LMStudioLLM {
/** @see LMStudioLLM.cacheContextWindows */
static modelContextWindows = {};
constructor(embedder = null, modelPreference = null) {
if (!process.env.LMSTUDIO_BASE_PATH)
throw new Error("No LMStudio API Base Path was set.");
this.lmstudio = new OpenAIApi({
baseURL: parseLMStudioBasePath(process.env.LMSTUDIO_BASE_PATH), // here is the URL to your LMStudio instance
apiKey: null,
});
// Prior to LMStudio 0.2.17 the `model` param was not required and you could pass anything
// into that field and it would work. On 0.2.17 LMStudio introduced multi-model chat
// which now has a bug that reports the server model id as "Loaded from Chat UI"
// and any other value will crash inferencing. So until this is patched we will
// try to fetch the `/models` and have the user set it, or just fallback to "Loaded from Chat UI"
// which will not impact users with <v0.2.17 and should work as well once the bug is fixed.
this.model =
modelPreference ||
process.env.LMSTUDIO_MODEL_PREF ||
"Loaded from Chat UI";
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
// Lazy load the limits to avoid blocking the main thread on cacheContextWindows
this.limits = null;
LMStudioLLM.cacheContextWindows(true);
this.#log(`initialized with model: ${this.model}`);
}
#log(text, ...args) {
console.log(`\x1b[32m[LMStudio]\x1b[0m ${text}`, ...args);
}
static #slog(text, ...args) {
console.log(`\x1b[32m[LMStudio]\x1b[0m ${text}`, ...args);
}
async assertModelContextLimits() {
if (this.limits !== null) return;
await LMStudioLLM.cacheContextWindows();
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
}
/**
* Cache the context windows for the LMStudio models.
* This is done once and then cached for the lifetime of the server. This is absolutely necessary to ensure that the context windows are correct.
*
* This is a convenience to ensure that the context windows are correct and that the user
* does not have to manually set the context window for each model.
* @param {boolean} force - Force the cache to be refreshed.
* @returns {Promise<void>} - A promise that resolves when the cache is refreshed.
*/
static async cacheContextWindows(force = false) {
try {
// Skip if we already have cached context windows and we're not forcing a refresh
if (Object.keys(LMStudioLLM.modelContextWindows).length > 0 && !force)
return;
const endpoint = new URL(
parseLMStudioBasePath(process.env.LMSTUDIO_BASE_PATH)
);
endpoint.pathname = "/api/v0/models";
await fetch(endpoint.toString())
.then((res) => {
if (!res.ok)
throw new Error(`LMStudio:cacheContextWindows - ${res.statusText}`);
return res.json();
})
.then(({ data: models }) => {
models.forEach((model) => {
if (model.type === "embeddings") return;
LMStudioLLM.modelContextWindows[model.id] =
model.max_context_length;
});
})
.catch((e) => {
LMStudioLLM.#slog(`Error caching context windows`, e);
return;
});
LMStudioLLM.#slog(`Context windows cached for all models!`);
} catch (e) {
LMStudioLLM.#slog(`Error caching context windows`, e);
return;
}
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
if (Object.keys(LMStudioLLM.modelContextWindows).length === 0) {
this.#slog(
"No context windows cached - Context window may be inaccurately reported."
);
return process.env.LMSTUDIO_MODEL_TOKEN_LIMIT || 4096;
}
let userDefinedLimit = null;
const systemDefinedLimit =
Number(this.modelContextWindows[modelName]) || 4096;
if (
process.env.LMSTUDIO_MODEL_TOKEN_LIMIT &&
!isNaN(Number(process.env.LMSTUDIO_MODEL_TOKEN_LIMIT)) &&
Number(process.env.LMSTUDIO_MODEL_TOKEN_LIMIT) > 0
)
userDefinedLimit = Number(process.env.LMSTUDIO_MODEL_TOKEN_LIMIT);
// The user defined limit is always higher priority than the context window limit, but it cannot be higher than the context window limit
// so we return the minimum of the two, if there is no user defined limit, we return the system defined limit as-is.
if (userDefinedLimit !== null)
return Math.min(userDefinedLimit, systemDefinedLimit);
return systemDefinedLimit;
}
promptWindowLimit() {
return this.constructor.promptWindowLimit(this.model);
}
async isValidChatCompletionModel(_ = "") {
// LMStudio may be anything. The user must do it correctly.
// See comment about this.model declaration in constructor
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`LMStudio chat: ${this.model} is not valid or defined model for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.lmstudio.chat.completions.create({
model: this.model,
messages,
temperature,
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage?.prompt_tokens || 0,
completion_tokens: result.output.usage?.completion_tokens || 0,
total_tokens: result.output.usage?.total_tokens || 0,
outputTps: result.output.usage?.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`LMStudio chat: ${this.model} is not valid or defined model for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.lmstudio.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
await this.assertModelContextLimits();
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
/**
* Parse the base path for the LMStudio API. Since the base path must end in /v1 and cannot have a trailing slash,
* and the user can possibly set it to anything and likely incorrectly due to pasting behaviors, we need to ensure it is in the correct format.
* @param {string} basePath
* @returns {string}
*/
function parseLMStudioBasePath(providedBasePath = "") {
try {
const baseURL = new URL(providedBasePath);
const basePath = `${baseURL.origin}/v1`;
return basePath;
} catch (e) {
return providedBasePath;
}
}
module.exports = {
LMStudioLLM,
parseLMStudioBasePath,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/ollama/index.js | server/utils/AiProviders/ollama/index.js | const {
writeResponseChunk,
clientAbortedHandler,
formatChatHistory,
} = require("../../helpers/chat/responses");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const { Ollama } = require("ollama");
const { v4: uuidv4 } = require("uuid");
// Docs: https://github.com/jmorganca/ollama/blob/main/docs/api.md
class OllamaAILLM {
/** @see OllamaAILLM.cacheContextWindows */
static modelContextWindows = {};
constructor(embedder = null, modelPreference = null) {
if (!process.env.OLLAMA_BASE_PATH)
throw new Error("No Ollama Base Path was set.");
this.className = "OllamaAILLM";
this.authToken = process.env.OLLAMA_AUTH_TOKEN;
this.basePath = process.env.OLLAMA_BASE_PATH;
this.model = modelPreference || process.env.OLLAMA_MODEL_PREF;
this.performanceMode = process.env.OLLAMA_PERFORMANCE_MODE || "base";
this.keepAlive = process.env.OLLAMA_KEEP_ALIVE_TIMEOUT
? Number(process.env.OLLAMA_KEEP_ALIVE_TIMEOUT)
: 300; // Default 5-minute timeout for Ollama model loading.
const headers = this.authToken
? { Authorization: `Bearer ${this.authToken}` }
: {};
this.client = new Ollama({
host: this.basePath,
headers: headers,
fetch: this.#applyFetch(),
});
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
// Lazy load the limits to avoid blocking the main thread on cacheContextWindows
this.limits = null;
OllamaAILLM.cacheContextWindows(true);
this.#log(
`initialized with\nmodel: ${this.model}\nperf: ${this.performanceMode}`
);
}
#log(text, ...args) {
console.log(`\x1b[32m[Ollama]\x1b[0m ${text}`, ...args);
}
static #slog(text, ...args) {
console.log(`\x1b[32m[Ollama]\x1b[0m ${text}`, ...args);
}
async assertModelContextLimits() {
if (this.limits !== null) return;
await OllamaAILLM.cacheContextWindows();
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
}
/**
* Cache the context windows for the Ollama models.
* This is done once and then cached for the lifetime of the server. This is absolutely necessary to ensure that the context windows are correct.
*
* This is a convenience to ensure that the context windows are correct and that the user
* does not have to manually set the context window for each model.
* @param {boolean} force - Force the cache to be refreshed.
* @returns {Promise<void>} - A promise that resolves when the cache is refreshed.
*/
static async cacheContextWindows(force = false) {
try {
// Skip if we already have cached context windows and we're not forcing a refresh
if (Object.keys(OllamaAILLM.modelContextWindows).length > 0 && !force)
return;
const authToken = process.env.OLLAMA_AUTH_TOKEN;
const basePath = process.env.OLLAMA_BASE_PATH;
const client = new Ollama({
host: basePath,
headers: authToken ? { Authorization: `Bearer ${authToken}` } : {},
});
const { models } = await client.list().catch(() => ({ models: [] }));
if (!models.length) return;
const infoPromises = models.map((model) =>
client
.show({ model: model.name })
.then((info) => ({ name: model.name, ...info }))
);
const infos = await Promise.all(infoPromises);
infos.forEach((showInfo) => {
if (showInfo.capabilities.includes("embedding")) return;
const contextWindowKey = Object.keys(showInfo.model_info).find((key) =>
key.endsWith(".context_length")
);
if (!contextWindowKey)
return (OllamaAILLM.modelContextWindows[showInfo.name] = 4096);
OllamaAILLM.modelContextWindows[showInfo.name] =
showInfo.model_info[contextWindowKey];
});
OllamaAILLM.#slog(`Context windows cached for all models!`);
} catch (e) {
OllamaAILLM.#slog(`Error caching context windows`, e);
return;
}
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
/**
* Apply a custom fetch function to the Ollama client.
* This is useful when we want to bypass the default 5m timeout for global fetch
* for machines which run responses very slowly.
* @returns {Function} The custom fetch function.
*/
#applyFetch() {
try {
if (!("OLLAMA_RESPONSE_TIMEOUT" in process.env)) return fetch;
const { Agent } = require("undici");
const moment = require("moment");
let timeout = process.env.OLLAMA_RESPONSE_TIMEOUT;
if (!timeout || isNaN(Number(timeout)) || Number(timeout) <= 5 * 60_000) {
this.#log(
"Timeout option was not set, is not a number, or is less than 5 minutes in ms - falling back to default",
{ timeout }
);
return fetch;
} else timeout = Number(timeout);
const noTimeoutFetch = (input, init = {}) => {
return fetch(input, {
...init,
dispatcher: new Agent({ headersTimeout: timeout }),
});
};
const humanDiff = moment.duration(timeout).humanize();
this.#log(`Applying custom fetch w/timeout of ${humanDiff}.`);
return noTimeoutFetch;
} catch (error) {
this.#log("Error applying custom fetch - using default fetch", error);
return fetch;
}
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
if (Object.keys(OllamaAILLM.modelContextWindows).length === 0) {
this.#slog(
"No context windows cached - Context window may be inaccurately reported."
);
return process.env.OLLAMA_MODEL_TOKEN_LIMIT || 4096;
}
let userDefinedLimit = null;
const systemDefinedLimit =
Number(this.modelContextWindows[modelName]) || 4096;
if (
process.env.OLLAMA_MODEL_TOKEN_LIMIT &&
!isNaN(Number(process.env.OLLAMA_MODEL_TOKEN_LIMIT)) &&
Number(process.env.OLLAMA_MODEL_TOKEN_LIMIT) > 0
)
userDefinedLimit = Number(process.env.OLLAMA_MODEL_TOKEN_LIMIT);
// The user defined limit is always higher priority than the context window limit, but it cannot be higher than the context window limit
// so we return the minimum of the two, if there is no user defined limit, we return the system defined limit as-is.
if (userDefinedLimit !== null)
return Math.min(userDefinedLimit, systemDefinedLimit);
return systemDefinedLimit;
}
promptWindowLimit() {
return this.constructor.promptWindowLimit(this.model);
}
async isValidChatCompletionModel(_ = "") {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {{content: string, images: string[]}}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) return { content: userPrompt };
const images = attachments.map(
(attachment) => attachment.contentString.split("base64,").slice(-1)[0]
);
return { content: userPrompt, images };
}
/**
* Handles errors from the Ollama API to make them more user friendly.
* @param {Error} e
*/
#errorHandler(e) {
switch (e.message) {
case "fetch failed":
throw new Error(
"Your Ollama instance could not be reached or is not responding. Please make sure it is running the API server and your connection information is correct in AnythingLLM."
);
default:
return e;
}
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent, "spread"),
{
role: "user",
...this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.client
.chat({
model: this.model,
stream: false,
messages,
keep_alive: this.keepAlive,
options: {
temperature,
use_mlock: true,
// There are currently only two performance settings so if its not "base" - its max context.
...(this.performanceMode === "base"
? {} // TODO: if in base mode, maybe we just use half the context window when below <10K?
: { num_ctx: this.promptWindowLimit() }),
},
})
.then((res) => {
let content = res.message.content;
if (res.message.thinking)
content = `<think>${res.message.thinking}</think>${content}`;
return {
content,
usage: {
prompt_tokens: res.prompt_eval_count,
completion_tokens: res.eval_count,
total_tokens: res.prompt_eval_count + res.eval_count,
duration: res.eval_duration / 1e9,
},
};
})
.catch((e) => {
throw new Error(
`Ollama::getChatCompletion failed to communicate with Ollama. ${this.#errorHandler(e).message}`
);
})
);
if (!result.output.content || !result.output.content.length)
throw new Error(`Ollama::getChatCompletion text response was empty.`);
return {
textResponse: result.output.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens,
completion_tokens: result.output.usage.completion_tokens,
total_tokens: result.output.usage.total_tokens,
outputTps:
result.output.usage.completion_tokens / result.output.usage.duration,
duration: result.output.usage.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.client.chat({
model: this.model,
stream: true,
messages,
keep_alive: this.keepAlive,
options: {
temperature,
use_mlock: true,
// There are currently only two performance settings so if its not "base" - its max context.
...(this.performanceMode === "base"
? {}
: { num_ctx: this.promptWindowLimit() }),
},
}),
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
}).catch((e) => {
throw this.#errorHandler(e);
});
return measuredStreamRequest;
}
/**
* Handles streaming responses from Ollama.
* @param {import("express").Response} response
* @param {import("../../helpers/chat/LLMPerformanceMonitor").MonitoredStream} stream
* @param {import("express").Request} request
* @returns {Promise<string>}
*/
handleStream(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => {
let fullText = "";
let reasoningText = "";
let usage = {
prompt_tokens: 0,
completion_tokens: 0,
};
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
try {
for await (const chunk of stream) {
if (chunk === undefined)
throw new Error(
"Stream returned undefined chunk. Aborting reply - check model provider logs."
);
if (chunk.done) {
usage.prompt_tokens = chunk.prompt_eval_count;
usage.completion_tokens = chunk.eval_count;
usage.duration = chunk.eval_duration / 1e9;
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
break;
}
if (chunk.hasOwnProperty("message")) {
// As of Ollama v0.9.0+, thinking content comes in a separate property
// in the response object. If it exists, we need to handle it separately by wrapping it in <think> tags.
const content = chunk.message.content;
const reasoningToken = chunk.message.thinking;
if (reasoningToken) {
if (reasoningText.length === 0) {
const startTag = "<think>";
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: startTag + reasoningToken,
close: false,
error: false,
});
reasoningText += startTag + reasoningToken;
} else {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: reasoningToken,
close: false,
error: false,
});
reasoningText += reasoningToken;
}
} else if (content.length > 0) {
// If we have reasoning text, we need to close the reasoning tag and then append the content.
if (reasoningText.length > 0) {
const endTag = "</think>";
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: endTag,
close: false,
error: false,
});
fullText += reasoningText + endTag;
reasoningText = ""; // Reset reasoning buffer
}
fullText += content; // Append regular text
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: content,
close: false,
error: false,
});
}
}
}
} catch (error) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: "",
close: true,
error: `Ollama:streaming - could not stream chat. ${
error?.cause ?? error.message
}`,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
await this.assertModelContextLimits();
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
OllamaAILLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/openAi/index.js | server/utils/AiProviders/openAi/index.js | const { v4: uuidv4 } = require("uuid");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
formatChatHistory,
writeResponseChunk,
clientAbortedHandler,
} = require("../../helpers/chat/responses");
const { MODEL_MAP } = require("../modelMap");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
class OpenAiLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.OPEN_AI_KEY) throw new Error("No OpenAI API key was set.");
this.className = "OpenAiLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
apiKey: process.env.OPEN_AI_KEY,
});
this.model = modelPreference || process.env.OPEN_MODEL_PREF || "gpt-4o";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log(
`Initialized ${this.model} with context window ${this.promptWindowLimit()}`
);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
return MODEL_MAP.get("openai", modelName) ?? 4_096;
}
promptWindowLimit() {
return MODEL_MAP.get("openai", this.model) ?? 4_096;
}
// Short circuit if name has 'gpt' since we now fetch models from OpenAI API
// via the user API key, so the model must be relevant and real.
// and if somehow it is not, chat will fail but that is caught.
// we don't want to hit the OpenAI api every chat because it will get spammed
// and introduce latency for no reason.
async isValidChatCompletionModel(modelName = "") {
const isPreset =
modelName.toLowerCase().includes("gpt") ||
modelName.toLowerCase().startsWith("o");
if (isPreset) return true;
const model = await this.openai.models
.retrieve(modelName)
.then((modelObj) => modelObj)
.catch(() => null);
return !!model;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "input_text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "input_image",
image_url: attachment.contentString,
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [], // This is the specific attachment for only this prompt
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
/**
* Determine the appropriate temperature for the model.
* @param {string} modelName
* @param {number} temperature
* @returns {number}
*/
#temperature(modelName, temperature) {
// For models that don't support temperature
// OpenAI accepts temperature 1
const NO_TEMP_MODELS = ["o", "gpt-5"];
if (NO_TEMP_MODELS.some((prefix) => modelName.startsWith(prefix))) {
return 1;
}
return temperature;
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`OpenAI chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.responses
.create({
model: this.model,
input: messages,
store: false,
temperature: this.#temperature(this.model, temperature),
})
.catch((e) => {
throw new Error(e.message);
})
);
if (!result.output.hasOwnProperty("output_text")) return null;
const usage = result.output.usage || {};
return {
textResponse: result.output.output_text,
metrics: {
prompt_tokens: usage.input_tokens || 0,
completion_tokens: usage.output_tokens || 0,
total_tokens: usage.total_tokens || 0,
outputTps: usage.output_tokens
? usage.output_tokens / result.duration
: 0,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`OpenAI chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.responses.create({
model: this.model,
stream: true,
input: messages,
store: false,
temperature: this.#temperature(this.model, temperature),
}),
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
let hasUsageMetrics = false;
let usage = {
completion_tokens: 0,
};
return new Promise(async (resolve) => {
let fullText = "";
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
try {
for await (const chunk of stream) {
if (chunk.type === "response.output_text.delta") {
const token = chunk.delta;
if (token) {
fullText += token;
if (!hasUsageMetrics) usage.completion_tokens++;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
} else if (chunk.type === "response.completed") {
const { response: res } = chunk;
if (res.hasOwnProperty("usage") && !!res.usage) {
hasUsageMetrics = true;
usage = {
...usage,
prompt_tokens: res.usage?.input_tokens || 0,
completion_tokens: res.usage?.output_tokens || 0,
total_tokens: res.usage?.total_tokens || 0,
};
}
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
break;
}
}
} catch (e) {
console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
writeResponseChunk(response, {
uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
stream?.endMeasurement(usage);
resolve(fullText);
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
OpenAiLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/bedrock/index.js | server/utils/AiProviders/bedrock/index.js | const {
ConverseCommand,
ConverseStreamCommand,
} = require("@aws-sdk/client-bedrock-runtime");
const {
writeResponseChunk,
clientAbortedHandler,
} = require("../../helpers/chat/responses");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const { v4: uuidv4 } = require("uuid");
const {
DEFAULT_MAX_OUTPUT_TOKENS,
DEFAULT_CONTEXT_WINDOW_TOKENS,
getImageFormatFromMime,
base64ToUint8Array,
createBedrockCredentials,
createBedrockRuntimeClient,
getBedrockAuthMethod,
} = require("./utils");
class AWSBedrockLLM {
/**
* List of Bedrock models observed to not support system prompts when using the Converse API.
* @type {string[]}
*/
noSystemPromptModels = [
"amazon.titan-text-express-v1",
"amazon.titan-text-lite-v1",
"cohere.command-text-v14",
"cohere.command-light-text-v14",
"us.deepseek.r1-v1:0",
// Add other models here if identified
];
/**
* Initializes the AWS Bedrock LLM connector.
* @param {object | null} [embedder=null] - An optional embedder instance. Defaults to NativeEmbedder.
* @param {string | null} [modelPreference=null] - Optional model ID override. Defaults to environment variable.
* @throws {Error} If required environment variables are missing or invalid.
*/
constructor(embedder = null, modelPreference = null) {
const requiredEnvVars = [
...(!["iam_role", "apiKey"].includes(this.authMethod)
? [
// required for iam and sessionToken
"AWS_BEDROCK_LLM_ACCESS_KEY_ID",
"AWS_BEDROCK_LLM_ACCESS_KEY",
]
: []),
...(this.authMethod === "sessionToken"
? [
// required for sessionToken
"AWS_BEDROCK_LLM_SESSION_TOKEN",
]
: []),
...(this.authMethod === "apiKey"
? [
// required for bedrock api key
"AWS_BEDROCK_LLM_API_KEY",
]
: []),
"AWS_BEDROCK_LLM_REGION",
"AWS_BEDROCK_LLM_MODEL_PREFERENCE",
];
// Validate required environment variables
for (const envVar of requiredEnvVars) {
if (!process.env[envVar])
throw new Error(`Required environment variable ${envVar} is not set.`);
}
this.model =
modelPreference || process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE;
const contextWindowLimit = this.promptWindowLimit();
this.limits = {
history: Math.floor(contextWindowLimit * 0.15),
system: Math.floor(contextWindowLimit * 0.15),
user: Math.floor(contextWindowLimit * 0.7),
};
this.bedrockClient = createBedrockRuntimeClient(
this.authMethod,
this.credentials
);
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.#log(
`Initialized with model: ${this.model}. Auth: ${this.authMethod}. Context Window: ${contextWindowLimit}.`
);
}
/**
* Gets the credentials for the AWS Bedrock LLM based on the authentication method provided.
* @returns {object} The credentials object.
*/
get credentials() {
return createBedrockCredentials(this.authMethod);
}
/**
* Gets the configured AWS authentication method ('iam' or 'sessionToken').
* Defaults to 'iam' if the environment variable is invalid.
* @returns {"iam" | "iam_role" | "sessionToken"} The authentication method.
*/
get authMethod() {
return getBedrockAuthMethod();
}
/**
* Appends context texts to a string with standard formatting.
* @param {string[]} contextTexts - An array of context text snippets.
* @returns {string} Formatted context string or empty string if no context provided.
* @private
*/
#appendContext(contextTexts = []) {
if (!contextTexts?.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`)
.join("")
);
}
/**
* Internal logging helper with provider prefix.
* @param {string} text - The log message.
* @param {...any} args - Additional arguments to log.
* @private
*/
#log(text, ...args) {
console.log(`\x1b[32m[AWSBedrock]\x1b[0m ${text}`, ...args);
}
/**
* Internal logging helper with provider prefix for static methods.
* @private
*/
static #slog(text, ...args) {
console.log(`\x1b[32m[AWSBedrock]\x1b[0m ${text}`, ...args);
}
/**
* Indicates if the provider supports streaming responses.
* @returns {boolean} True.
*/
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
/**
* @static
* Gets the total prompt window limit (total context window: input + output) from the environment variable.
* This value is used for calculating input limits, NOT for setting the max output tokens in API calls.
* @returns {number} The total context window token limit. Defaults to 8191.
*/
static promptWindowLimit() {
const limit =
process.env.AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT ??
DEFAULT_CONTEXT_WINDOW_TOKENS;
const numericLimit = Number(limit);
if (isNaN(numericLimit) || numericLimit <= 0) {
this.#slog(
`[AWSBedrock ERROR] Invalid AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT found: "${limitSourceValue}". Must be a positive number - returning default ${DEFAULT_CONTEXT_WINDOW_TOKENS}.`
);
return DEFAULT_CONTEXT_WINDOW_TOKENS;
}
return numericLimit;
}
/**
* Gets the total prompt window limit (total context window) for the current model instance.
* @returns {number} The token limit.
*/
promptWindowLimit() {
return AWSBedrockLLM.promptWindowLimit();
}
/**
* Gets the maximum number of tokens the model should generate in its response.
* Reads from the AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS environment variable or uses a default.
* This is distinct from the total context window limit.
* @returns {number} The maximum output tokens limit for API calls.
*/
getMaxOutputTokens() {
const outputLimitSource = process.env.AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS;
if (isNaN(Number(outputLimitSource))) {
this.#log(
`[AWSBedrock ERROR] Invalid AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS found: "${outputLimitSource}". Must be a positive number - returning default ${DEFAULT_MAX_OUTPUT_TOKENS}.`
);
return DEFAULT_MAX_OUTPUT_TOKENS;
}
const numericOutputLimit = Number(outputLimitSource);
if (numericOutputLimit <= 0) {
this.#log(
`[AWSBedrock ERROR] Invalid AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS found: "${outputLimitSource}". Must be a greater than 0 - returning default ${DEFAULT_MAX_OUTPUT_TOKENS}.`
);
return DEFAULT_MAX_OUTPUT_TOKENS;
}
return numericOutputLimit;
}
/** Stubbed method for compatibility with LLM interface. */
async isValidChatCompletionModel(_modelName = "") {
return true;
}
/**
* Validates attachments array and returns a new array with valid attachments.
* @param {Array<{contentString: string, mime: string}>} attachments - Array of attachments.
* @returns {Array<{image: {format: string, source: {bytes: Uint8Array}}>} Array of valid attachments.
* @private
*/
#validateAttachments(attachments = []) {
if (!Array.isArray(attachments) || !attachments?.length) return [];
const validAttachments = [];
for (const attachment of attachments) {
if (
!attachment ||
typeof attachment.mime !== "string" ||
typeof attachment.contentString !== "string"
) {
this.#log("Skipping invalid attachment object.", attachment);
continue;
}
// Strip data URI prefix (e.g., "data:image/png;base64,")
const base64Data = attachment.contentString.replace(
/^data:image\/\w+;base64,/,
""
);
const format = getImageFormatFromMime(attachment.mime);
const attachmentInfo = {
valid: format !== null,
format,
imageBytes: base64ToUint8Array(base64Data),
};
if (!attachmentInfo.valid) {
this.#log(
`Skipping attachment with unsupported/invalid MIME type: ${attachment.mime}`
);
continue;
}
validAttachments.push({
image: {
format: format,
source: { bytes: attachmentInfo.imageBytes },
},
});
}
return validAttachments;
}
/**
* Generates the Bedrock Converse API content array for a message,
* processing text and formatting valid image attachments.
* @param {object} params
* @param {string} params.userPrompt - The text part of the message.
* @param {Array<{contentString: string, mime: string}>} params.attachments - Array of attachments for the message.
* @returns {Array<object>} Array of content blocks (e.g., [{text: "..."}, {image: {...}}]).
* @private
*/
#generateContent({ userPrompt = "", attachments = [] }) {
const content = [];
// Add text block if prompt is not empty
if (userPrompt?.trim()?.length) content.push({ text: userPrompt });
// Validate attachments and add valid attachments to content
const validAttachments = this.#validateAttachments(attachments);
if (validAttachments?.length) content.push(...validAttachments);
// Ensure content array is never empty (Bedrock requires at least one block)
if (content.length === 0) content.push({ text: "" });
return content;
}
/**
* Constructs the complete message array in the format expected by the Bedrock Converse API.
* @param {object} params
* @param {string} params.systemPrompt - The system prompt text.
* @param {string[]} params.contextTexts - Array of context text snippets.
* @param {Array<{role: 'user' | 'assistant', content: string, attachments?: Array<{contentString: string, mime: string}>}>} params.chatHistory - Previous messages.
* @param {string} params.userPrompt - The latest user prompt text.
* @param {Array<{contentString: string, mime: string}>} params.attachments - Attachments for the latest user prompt.
* @returns {Array<object>} The formatted message array for the API call.
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const systemMessageContent = `${systemPrompt}${this.#appendContext(contextTexts)}`;
let messages = [];
// Handle system prompt (either real or simulated)
if (this.noSystemPromptModels.includes(this.model)) {
if (systemMessageContent.trim().length > 0) {
this.#log(
`Model ${this.model} doesn't support system prompts; simulating.`
);
messages.push(
{
role: "user",
content: this.#generateContent({
userPrompt: systemMessageContent,
}),
},
{ role: "assistant", content: [{ text: "Okay." }] }
);
}
} else if (systemMessageContent.trim().length > 0) {
messages.push({
role: "system",
content: this.#generateContent({ userPrompt: systemMessageContent }),
});
}
// Add chat history
messages = messages.concat(
chatHistory.map((msg) => ({
role: msg.role,
content: this.#generateContent({
userPrompt: msg.content,
attachments: Array.isArray(msg.attachments) ? msg.attachments : [],
}),
}))
);
// Add final user prompt
messages.push({
role: "user",
content: this.#generateContent({
userPrompt: userPrompt,
attachments: Array.isArray(attachments) ? attachments : [],
}),
});
return messages;
}
/**
* Parses reasoning steps from the response and prepends them in <think> tags.
* @param {object} message - The message object from the Bedrock response.
* @returns {string} The text response, potentially with reasoning prepended.
* @private
*/
#parseReasoningFromResponse({ content = [] }) {
if (!content?.length) return "";
// Find the text block and grab the text
const textBlock = content.find((block) => block.text !== undefined);
let textResponse = textBlock?.text || "";
// Find the reasoning block and grab the reasoning text
const reasoningBlock = content.find(
(block) => block.reasoningContent?.reasoningText?.text
);
if (reasoningBlock) {
const reasoningText =
reasoningBlock.reasoningContent.reasoningText.text.trim();
if (reasoningText?.length)
textResponse = `<think>${reasoningText}</think>${textResponse}`;
}
return textResponse;
}
/**
* Sends a request for chat completion (non-streaming).
* @param {Array<object> | null} messages - Formatted message array from constructPrompt.
* @param {object} options - Request options.
* @param {number} options.temperature - Sampling temperature.
* @returns {Promise<object | null>} Response object with textResponse and metrics, or null.
* @throws {Error} If the API call fails or validation errors occur.
*/
async getChatCompletion(messages = null, { temperature }) {
if (!messages?.length)
throw new Error(
"AWSBedrock::getChatCompletion requires a non-empty messages array."
);
const hasSystem = messages[0]?.role === "system";
const systemBlock = hasSystem ? messages[0].content : undefined;
const history = hasSystem ? messages.slice(1) : messages;
const maxTokensToSend = this.getMaxOutputTokens();
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.bedrockClient
.send(
new ConverseCommand({
modelId: this.model,
messages: history,
inferenceConfig: {
maxTokens: maxTokensToSend,
temperature: temperature ?? this.defaultTemp,
},
system: systemBlock,
})
)
.catch((e) => {
this.#log(
`Bedrock Converse API Error (getChatCompletion): ${e.message}`,
e
);
if (
e.name === "ValidationException" &&
e.message.includes("maximum tokens")
) {
throw new Error(
`AWSBedrock::getChatCompletion failed. Model ${this.model} rejected maxTokens value of ${maxTokensToSend}. Check model documentation for its maximum output token limit and set AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS if needed. Original error: ${e.message}`
);
}
throw new Error(`AWSBedrock::getChatCompletion failed. ${e.message}`);
})
);
const response = result.output;
if (!response?.output?.message) {
this.#log(
"Bedrock response missing expected output.message structure.",
response
);
return null;
}
const latencyMs = response?.metrics?.latencyMs;
const outputTokens = response?.usage?.outputTokens;
const outputTps =
latencyMs > 0 && outputTokens ? outputTokens / (latencyMs / 1000) : 0;
return {
textResponse: this.#parseReasoningFromResponse(response.output.message),
metrics: {
prompt_tokens: response?.usage?.inputTokens ?? 0,
completion_tokens: outputTokens ?? 0,
total_tokens: response?.usage?.totalTokens ?? 0,
outputTps: outputTps,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
/**
* Sends a request for streaming chat completion.
* @param {Array<object> | null} messages - Formatted message array from constructPrompt.
* @param {object} options - Request options.
* @param {number} [options.temperature] - Sampling temperature.
* @returns {Promise<import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream>} The monitored stream object.
* @throws {Error} If the API call setup fails or validation errors occur.
*/
async streamGetChatCompletion(messages = null, { temperature }) {
if (!Array.isArray(messages) || messages.length === 0) {
throw new Error(
"AWSBedrock::streamGetChatCompletion requires a non-empty messages array."
);
}
const hasSystem = messages[0]?.role === "system";
const systemBlock = hasSystem ? messages[0].content : undefined;
const history = hasSystem ? messages.slice(1) : messages;
const maxTokensToSend = this.getMaxOutputTokens();
try {
// Attempt to initiate the stream
const stream = await this.bedrockClient.send(
new ConverseStreamCommand({
modelId: this.model,
messages: history,
inferenceConfig: {
maxTokens: maxTokensToSend,
temperature: temperature ?? this.defaultTemp,
},
system: systemBlock,
})
);
// If successful, wrap the stream with performance monitoring
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: stream,
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
});
return measuredStreamRequest;
} catch (e) {
// Catch errors during the initial .send() call (e.g., validation errors)
this.#log(
`Bedrock Converse API Error (streamGetChatCompletion setup): ${e.message}`,
e
);
if (
e.name === "ValidationException" &&
e.message.includes("maximum tokens")
) {
throw new Error(
`AWSBedrock::streamGetChatCompletion failed during setup. Model ${this.model} rejected maxTokens value of ${maxTokensToSend}. Check model documentation for its maximum output token limit and set AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS if needed. Original error: ${e.message}`
);
}
throw new Error(
`AWSBedrock::streamGetChatCompletion failed during setup. ${e.message}`
);
}
}
/**
* Handles the stream response from the AWS Bedrock API ConverseStreamCommand.
* Parses chunks, handles reasoning tags, and estimates token usage if not provided.
* @param {object} response - The HTTP response object to write chunks to.
* @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - The monitored stream object from streamGetChatCompletion.
* @param {object} responseProps - Additional properties for the response chunks.
* @param {string} responseProps.uuid - Unique ID for the response.
* @param {Array} responseProps.sources - Source documents used (if any).
* @returns {Promise<string>} A promise that resolves with the complete text response when the stream ends.
*/
handleStream(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
let hasUsageMetrics = false;
let usage = { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 };
return new Promise(async (resolve) => {
let fullText = "";
let reasoningText = "";
// Abort handler for client closing connection
const handleAbort = () => {
this.#log(`Client closed connection for stream ${uuid}. Aborting.`);
stream?.endMeasurement(usage); // Finalize metrics
clientAbortedHandler(resolve, fullText); // Resolve with partial text
};
response.on("close", handleAbort);
try {
// Process stream chunks
for await (const chunk of stream.stream) {
if (!chunk) {
this.#log("Stream returned null/undefined chunk.");
continue;
}
const action = Object.keys(chunk)[0];
switch (action) {
case "metadata": // Contains usage metrics at the end
if (chunk.metadata?.usage) {
hasUsageMetrics = true;
usage = {
// Overwrite with final metrics
prompt_tokens: chunk.metadata.usage.inputTokens ?? 0,
completion_tokens: chunk.metadata.usage.outputTokens ?? 0,
total_tokens: chunk.metadata.usage.totalTokens ?? 0,
};
}
break;
case "contentBlockDelta": {
// Contains text or reasoning deltas
const delta = chunk.contentBlockDelta?.delta;
if (!delta) break;
const token = delta.text;
const reasoningToken = delta.reasoningContent?.text;
if (reasoningToken) {
// Handle reasoning text
if (reasoningText.length === 0) {
// Start of reasoning block
const startTag = "<think>";
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: startTag + reasoningToken,
close: false,
error: false,
});
reasoningText += startTag + reasoningToken;
} else {
// Continuation of reasoning block
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: reasoningToken,
close: false,
error: false,
});
reasoningText += reasoningToken;
}
} else if (token) {
// Handle regular text
if (reasoningText.length > 0) {
// If reasoning was just output, close the tag
const endTag = "</think>";
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: endTag,
close: false,
error: false,
});
fullText += reasoningText + endTag; // Add completed reasoning to final text
reasoningText = ""; // Reset reasoning buffer
}
fullText += token; // Append regular text
if (!hasUsageMetrics) usage.completion_tokens++; // Estimate usage if no metrics yet
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
break;
}
case "messageStop": // End of message event
if (chunk.messageStop?.usage) {
// Check for final metrics here too
hasUsageMetrics = true;
usage = {
// Overwrite with final metrics if available
prompt_tokens:
chunk.messageStop.usage.inputTokens ?? usage.prompt_tokens,
completion_tokens:
chunk.messageStop.usage.outputTokens ??
usage.completion_tokens,
total_tokens:
chunk.messageStop.usage.totalTokens ?? usage.total_tokens,
};
}
// Ensure reasoning tag is closed if message stops mid-reasoning
if (reasoningText.length > 0) {
const endTag = "</think>";
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: endTag,
close: false,
error: false,
});
fullText += reasoningText + endTag;
reasoningText = "";
}
break;
// Ignore other event types for now
case "messageStart":
case "contentBlockStart":
case "contentBlockStop":
break;
default:
this.#log(`Unhandled stream action: ${action}`, chunk);
}
} // End for await loop
// Final cleanup for reasoning tag in case stream ended abruptly
if (reasoningText.length > 0 && !fullText.endsWith("</think>")) {
const endTag = "</think>";
if (!response.writableEnded) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: endTag,
close: false,
error: false,
});
}
fullText += reasoningText + endTag;
}
// Send final closing chunk to signal end of stream
if (!response.writableEnded) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
}
} catch (error) {
// Handle errors during stream processing
this.#log(
`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${error.message}`,
error
);
if (response && !response.writableEnded) {
writeResponseChunk(response, {
uuid,
type: "abort",
textResponse: null,
sources,
close: true,
error: `AWSBedrock:streaming - error. ${
error?.message ?? "Unknown error"
}`,
});
}
} finally {
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText); // Resolve with the accumulated text
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
AWSBedrockLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/bedrock/utils.js | server/utils/AiProviders/bedrock/utils.js | const { BedrockRuntimeClient } = require("@aws-sdk/client-bedrock-runtime");
const { fromStatic } = require("@aws-sdk/token-providers");
const { ChatBedrockConverse } = require("@langchain/aws");
/** @typedef {'jpeg' | 'png' | 'gif' | 'webp'} */
const SUPPORTED_BEDROCK_IMAGE_FORMATS = ["jpeg", "png", "gif", "webp"];
/** @type {number} */
const DEFAULT_MAX_OUTPUT_TOKENS = 4096;
/** @type {number} */
const DEFAULT_CONTEXT_WINDOW_TOKENS = 8191;
/** @type {'iam' | 'iam_role' | 'sessionToken' | 'apiKey'} */
const SUPPORTED_CONNECTION_METHODS = [
"iam",
"iam_role",
"sessionToken",
"apiKey",
];
/**
* Gets the AWS Bedrock authentication method from the environment variables.
* @returns {"iam" | "iam_role" | "sessionToken" | "apiKey"} The authentication method.
*/
function getBedrockAuthMethod() {
const method = process.env.AWS_BEDROCK_LLM_CONNECTION_METHOD || "iam";
return SUPPORTED_CONNECTION_METHODS.includes(method) ? method : "iam";
}
/**
* Creates the AWS Bedrock credentials object based on the authentication method.
* @param {"iam" | "iam_role" | "sessionToken" | "apiKey"} authMethod - The authentication method.
* @returns {object | undefined} The credentials object.
*/
function createBedrockCredentials(authMethod) {
switch (authMethod) {
case "iam": // explicit credentials
return {
accessKeyId: process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_BEDROCK_LLM_ACCESS_KEY,
};
case "sessionToken": // Session token is used for temporary credentials
return {
accessKeyId: process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_BEDROCK_LLM_ACCESS_KEY,
sessionToken: process.env.AWS_BEDROCK_LLM_SESSION_TOKEN,
};
// IAM role is used for long-term credentials implied by system process
// is filled by the AWS SDK automatically if we pass in no credentials
// returning undefined will allow this to happen
case "iam_role":
return undefined;
case "apiKey":
return fromStatic({
token: { token: process.env.AWS_BEDROCK_LLM_API_KEY },
});
default:
return undefined;
}
}
/**
* Creates the AWS Bedrock runtime client based on the authentication method.
* @param {"iam" | "iam_role" | "sessionToken" | "apiKey"} authMethod - The authentication method.
* @param {object | undefined} credentials - The credentials object.
* @returns {BedrockRuntimeClient} The runtime client.
*/
function createBedrockRuntimeClient(authMethod, credentials) {
const clientOpts = {
region: process.env.AWS_BEDROCK_LLM_REGION,
};
if (authMethod === "apiKey") {
clientOpts.token = credentials;
clientOpts.authSchemePreference = ["httpBearerAuth"];
} else {
clientOpts.credentials = credentials;
}
return new BedrockRuntimeClient(clientOpts);
}
/**
* Creates the AWS Bedrock chat client based on the authentication method.
* Used explicitly by the agent provider for the AWS Bedrock provider.
* @param {object} config - The configuration object.
* @param {"iam" | "iam_role" | "sessionToken" | "apiKey"} authMethod - The authentication method.
* @param {object | undefined} credentials - The credentials object.
* @param {string | null} model - The model to use.
* @returns {ChatBedrockConverse} The chat client.
*/
function createBedrockChatClient(config = {}, authMethod, credentials, model) {
authMethod ||= getBedrockAuthMethod();
credentials ||= createBedrockCredentials(authMethod);
model ||= process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE ?? null;
const client = createBedrockRuntimeClient(authMethod, credentials);
return new ChatBedrockConverse({
region: process.env.AWS_BEDROCK_LLM_REGION,
client,
model,
...config,
});
}
/**
* Parses a MIME type string (e.g., "image/jpeg") to extract and validate the image format
* supported by Bedrock Converse. Handles 'image/jpg' as 'jpeg'.
* @param {string | null | undefined} mimeType - The MIME type string.
* @returns {string | null} The validated image format (e.g., "jpeg") or null if invalid/unsupported.
*/
function getImageFormatFromMime(mimeType = "") {
if (!mimeType) return null;
const parts = mimeType.toLowerCase().split("/");
if (parts?.[0] !== "image") return null;
let format = parts?.[1];
if (!format) return null;
// Remap jpg to jpeg
switch (format) {
case "jpg":
format = "jpeg";
break;
default:
break;
}
if (!SUPPORTED_BEDROCK_IMAGE_FORMATS.includes(format)) return null;
return format;
}
/**
* Decodes a pure base64 string (without data URI prefix) into a Uint8Array using the atob method.
* This approach matches the technique previously used by Langchain's implementation.
* @param {string} base64String - The pure base64 encoded data.
* @returns {Uint8Array | null} The resulting byte array or null on decoding error.
*/
function base64ToUint8Array(base64String) {
try {
const binaryString = atob(base64String);
const len = binaryString.length;
const bytes = new Uint8Array(len);
for (let i = 0; i < len; i++) bytes[i] = binaryString.charCodeAt(i);
return bytes;
} catch (e) {
console.error(
`[AWSBedrock] Error decoding base64 string with atob: ${e.message}`
);
return null;
}
}
module.exports = {
SUPPORTED_CONNECTION_METHODS,
SUPPORTED_BEDROCK_IMAGE_FORMATS,
DEFAULT_MAX_OUTPUT_TOKENS,
DEFAULT_CONTEXT_WINDOW_TOKENS,
getImageFormatFromMime,
base64ToUint8Array,
getBedrockAuthMethod,
createBedrockCredentials,
createBedrockRuntimeClient,
createBedrockChatClient,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/mistral/index.js | server/utils/AiProviders/mistral/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
formatChatHistory,
} = require("../../helpers/chat/responses");
class MistralLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.MISTRAL_API_KEY)
throw new Error("No Mistral API key was set.");
this.className = "MistralLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
baseURL: "https://api.mistral.ai/v1",
apiKey: process.env.MISTRAL_API_KEY ?? null,
});
this.model =
modelPreference || process.env.MISTRAL_MODEL_PREF || "mistral-tiny";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.0;
this.log("Initialized with model:", this.model);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit() {
return 32000;
}
promptWindowLimit() {
return 32000;
}
async isValidChatCompletionModel(modelName = "") {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) return userPrompt;
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: attachment.contentString,
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [], // This is the specific attachment for only this prompt
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Mistral chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Mistral chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
MistralLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/apipie/index.js | server/utils/AiProviders/apipie/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { v4: uuidv4 } = require("uuid");
const {
writeResponseChunk,
clientAbortedHandler,
formatChatHistory,
} = require("../../helpers/chat/responses");
const fs = require("fs");
const path = require("path");
const { safeJsonParse } = require("../../http");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "apipie")
: path.resolve(__dirname, `../../../storage/models/apipie`)
);
class ApiPieLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.APIPIE_LLM_API_KEY)
throw new Error("No ApiPie LLM API key was set.");
this.className = "ApiPieLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.basePath = "https://apipie.ai/v1";
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: process.env.APIPIE_LLM_API_KEY ?? null,
});
this.model =
modelPreference ||
process.env.APIPIE_LLM_MODEL_PREF ||
"openrouter/mistral-7b-instruct";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
// from the current date. If it is, then we will refetch the API so that all the models are up
// to date.
#cacheIsStale() {
const MAX_STALE = 6.048e8; // 1 Week in MS
if (!fs.existsSync(this.cacheAtPath)) return true;
const now = Number(new Date());
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
return now - timestampMs > MAX_STALE;
}
// This function fetches the models from the ApiPie API and caches them locally.
// We do this because the ApiPie API has a lot of models, and we need to get the proper token context window
// for each model and this is a constructor property - so we can really only get it if this cache exists.
// We used to have this as a chore, but given there is an API to get the info - this makes little sense.
// This might slow down the first request, but we need the proper token context window
// for each model and this is a constructor property - so we can really only get it if this cache exists.
async #syncModels() {
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
return false;
this.log("Model cache is not present or stale. Fetching from ApiPie API.");
await fetchApiPieModels();
return;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}
chatModels() {
const allModels = this.models();
return Object.entries(allModels).reduce(
(chatModels, [modelId, modelInfo]) => {
// Filter for chat models
if (
modelInfo.subtype &&
(modelInfo.subtype.includes("chat") ||
modelInfo.subtype.includes("chatx"))
) {
chatModels[modelId] = modelInfo;
}
return chatModels;
},
{}
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
const cacheModelPath = path.resolve(cacheFolder, "models.json");
const availableModels = fs.existsSync(cacheModelPath)
? safeJsonParse(
fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
{}
)
: {};
return availableModels[modelName]?.maxLength || 4096;
}
promptWindowLimit() {
const availableModels = this.chatModels();
return availableModels[this.model]?.maxLength || 4096;
}
async isValidChatCompletionModel(model = "") {
await this.#syncModels();
const availableModels = this.chatModels();
return availableModels.hasOwnProperty(model);
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`ApiPie chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage?.prompt_tokens || 0,
completion_tokens: result.output.usage?.completion_tokens || 0,
total_tokens: result.output.usage?.total_tokens || 0,
outputTps:
(result.output.usage?.completion_tokens || 0) / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`ApiPie chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => {
let fullText = "";
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
if (token) {
fullText += token;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
if (message === undefined || message.finish_reason !== null) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
}
} catch (e) {
writeResponseChunk(response, {
uuid,
sources,
type: "abort",
textResponse: null,
close: true,
error: e.message,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
async function fetchApiPieModels(providedApiKey = null) {
const apiKey = providedApiKey || process.env.APIPIE_LLM_API_KEY || null;
return await fetch(`https://apipie.ai/v1/models`, {
method: "GET",
headers: {
"Content-Type": "application/json",
...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}),
},
})
.then((res) => res.json())
.then(({ data = [] }) => {
const models = {};
data.forEach((model) => {
models[`${model.provider}/${model.model}`] = {
id: `${model.provider}/${model.model}`,
name: `${model.provider}/${model.model}`,
organization: model.provider,
subtype: model.subtype,
maxLength: model.max_tokens,
};
});
// Cache all response information
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(models),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return models;
})
.catch((e) => {
console.error(e);
return {};
});
}
module.exports = {
ApiPieLLM,
fetchApiPieModels,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/anthropic/index.js | server/utils/AiProviders/anthropic/index.js | const { v4 } = require("uuid");
const {
writeResponseChunk,
clientAbortedHandler,
formatChatHistory,
} = require("../../helpers/chat/responses");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { MODEL_MAP } = require("../modelMap");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
class AnthropicLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.ANTHROPIC_API_KEY)
throw new Error("No Anthropic API key was set.");
this.className = "AnthropicLLM";
// Docs: https://www.npmjs.com/package/@anthropic-ai/sdk
const AnthropicAI = require("@anthropic-ai/sdk");
const anthropic = new AnthropicAI({
apiKey: process.env.ANTHROPIC_API_KEY,
});
this.anthropic = anthropic;
this.model =
modelPreference ||
process.env.ANTHROPIC_MODEL_PREF ||
"claude-3-5-sonnet-20241022";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log(
`Initialized with ${this.model}. Cache ${this.cacheControl ? `enabled (${this.cacheControl.ttl})` : "disabled"}`
);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
return MODEL_MAP.get("anthropic", modelName) ?? 100_000;
}
promptWindowLimit() {
return MODEL_MAP.get("anthropic", this.model) ?? 100_000;
}
isValidChatCompletionModel(_modelName = "") {
return true;
}
/**
* Parses the cache control ENV variable
*
* If caching is enabled, we can pass less than 1024 tokens and Anthropic will just
* ignore it unless it is above the model's minimum. Since this feature is opt-in
* we can safely assume that if caching is enabled that we should just pass the content as is.
* https://docs.claude.com/en/docs/build-with-claude/prompt-caching#cache-limitations
*
* @param {string} value - The ENV value (5m or 1h)
* @returns {null|{type: "ephemeral", ttl: "5m" | "1h"}} Cache control configuration
*/
get cacheControl() {
// Store result in instance variable to avoid recalculating
if (this._cacheControl) return this._cacheControl;
if (!process.env.ANTHROPIC_CACHE_CONTROL) this._cacheControl = null;
else {
const normalized =
process.env.ANTHROPIC_CACHE_CONTROL.toLowerCase().trim();
if (["5m", "1h"].includes(normalized))
this._cacheControl = { type: "ephemeral", ttl: normalized };
else this._cacheControl = null;
}
return this._cacheControl;
}
/**
* Builds system parameter with cache control if applicable
* @param {string} systemContent - The system prompt content
* @returns {string|array} System parameter for API call
*/
#buildSystemPrompt(systemContent) {
if (!systemContent || !this.cacheControl) return systemContent;
return [
{
type: "text",
text: systemContent,
cache_control: this.cacheControl,
},
];
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image",
source: {
type: "base64",
media_type: attachment.mime,
data: attachment.contentString.split("base64,")[1],
},
});
}
return content.flat();
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [], // This is the specific attachment for only this prompt
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
try {
const systemContent = messages[0].content;
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.anthropic.messages.create({
model: this.model,
max_tokens: 4096,
system: this.#buildSystemPrompt(systemContent),
messages: messages.slice(1), // Pop off the system message
temperature: Number(temperature ?? this.defaultTemp),
})
);
const promptTokens = result.output.usage.input_tokens;
const completionTokens = result.output.usage.output_tokens;
return {
textResponse: result.output.content[0].text,
metrics: {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
outputTps: completionTokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
} catch (error) {
console.log(error);
return { textResponse: error, metrics: {} };
}
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const systemContent = messages[0].content;
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.anthropic.messages.stream({
model: this.model,
max_tokens: 4096,
system: this.#buildSystemPrompt(systemContent),
messages: messages.slice(1), // Pop off the system message
temperature: Number(temperature ?? this.defaultTemp),
}),
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
});
return measuredStreamRequest;
}
/**
* Handles the stream response from the Anthropic API.
* @param {Object} response - the response object
* @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream - the stream response from the Anthropic API w/tracking
* @param {Object} responseProps - the response properties
* @returns {Promise<string>}
*/
handleStream(response, stream, responseProps) {
return new Promise((resolve) => {
let fullText = "";
const { uuid = v4(), sources = [] } = responseProps;
let usage = {
prompt_tokens: 0,
completion_tokens: 0,
};
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
stream.on("error", (event) => {
const parseErrorMsg = (event) => {
const error = event?.error?.error;
if (!!error)
return `Anthropic Error:${error?.type || "unknown"} ${
error?.message || "unknown error."
}`;
return event.message;
};
writeResponseChunk(response, {
uuid,
sources: [],
type: "abort",
textResponse: null,
close: true,
error: parseErrorMsg(event),
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
});
stream.on("streamEvent", (message) => {
const data = message;
if (data.type === "message_start")
usage.prompt_tokens = data?.message?.usage?.input_tokens;
if (data.type === "message_delta")
usage.completion_tokens = data?.usage?.output_tokens;
if (
data.type === "content_block_delta" &&
data.delta.type === "text_delta"
) {
const text = data.delta.text;
fullText += text;
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: text,
close: false,
error: false,
});
}
if (
message.type === "message_stop" ||
(data.stop_reason && data.stop_reason === "end_turn")
) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
}
});
});
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageStringCompressor } = require("../../helpers/chat");
const compressedPrompt = await messageStringCompressor(
this,
promptArgs,
rawHistory
);
return compressedPrompt;
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
}
module.exports = {
AnthropicLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/giteeai/index.js | server/utils/AiProviders/giteeai/index.js | const fs = require("fs");
const path = require("path");
const { v4: uuidv4 } = require("uuid");
const { safeJsonParse, toValidNumber } = require("../../http");
const LEGACY_MODEL_MAP = require("../modelMap/legacy");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
writeResponseChunk,
clientAbortedHandler,
} = require("../../helpers/chat/responses");
const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "giteeai")
: path.resolve(__dirname, `../../../storage/models/giteeai`)
);
class GiteeAILLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.GITEE_AI_API_KEY)
throw new Error("No Gitee AI API key was set.");
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
apiKey: process.env.GITEE_AI_API_KEY,
baseURL: "https://ai.gitee.com/v1",
});
this.model = modelPreference || process.env.GITEE_AI_MODEL_PREF || "";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
this.log("Initialized with model:", this.model);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
}
// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
// from the current date. If it is, then we will refetch the API so that all the models are up
// to date.
#cacheIsStale() {
const MAX_STALE = 6.048e8; // 1 Week in MS
if (!fs.existsSync(this.cacheAtPath)) return true;
const now = Number(new Date());
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
return now - timestampMs > MAX_STALE;
}
// This function fetches the models from the GiteeAI API and caches them locally.
async #syncModels() {
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
return false;
this.log("Model cache is not present or stale. Fetching from GiteeAI API.");
await giteeAiModels();
return;
}
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(model) {
return (
toValidNumber(process.env.GITEE_AI_MODEL_TOKEN_LIMIT) ||
LEGACY_MODEL_MAP.giteeai[model] ||
8192
);
}
promptWindowLimit() {
return (
toValidNumber(process.env.GITEE_AI_MODEL_TOKEN_LIMIT) ||
LEGACY_MODEL_MAP.giteeai[this.model] ||
8192
);
}
async isValidChatCompletionModel(modelName = "") {
return true;
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
/**
* Parses and prepends reasoning from the response and returns the full text response.
* @param {Object} response
* @returns {string}
*/
#parseReasoningFromResponse({ message }) {
let textResponse = message?.content;
if (
!!message?.reasoning_content &&
message.reasoning_content.trim().length > 0
)
textResponse = `<think>${message.reasoning_content}</think>${textResponse}`;
return textResponse;
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result?.output?.hasOwnProperty("choices") ||
result?.output?.choices?.length === 0
)
throw new Error(
`Invalid response body returned from GiteeAI: ${JSON.stringify(result.output)}`
);
return {
textResponse: this.#parseReasoningFromResponse(result.output.choices[0]),
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
});
return measuredStreamRequest;
}
// TODO: This is a copy of the generic handleStream function in responses.js
// to specifically handle the GiteeAI reasoning model `reasoning_content` field.
// When or if ever possible, we should refactor this to be in the generic function.
handleStream(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
let hasUsageMetrics = false;
let usage = {
completion_tokens: 0,
};
return new Promise(async (resolve) => {
let fullText = "";
let reasoningText = "";
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
const reasoningToken = message?.delta?.reasoning_content;
if (
chunk.hasOwnProperty("usage") && // exists
!!chunk.usage && // is not null
Object.values(chunk.usage).length > 0 // has values
) {
if (chunk.usage.hasOwnProperty("prompt_tokens")) {
usage.prompt_tokens = Number(chunk.usage.prompt_tokens);
}
if (chunk.usage.hasOwnProperty("completion_tokens")) {
hasUsageMetrics = true; // to stop estimating counter
usage.completion_tokens = Number(chunk.usage.completion_tokens);
}
}
// Reasoning models will always return the reasoning text before the token text.
if (reasoningToken) {
// If the reasoning text is empty (''), we need to initialize it
// and send the first chunk of reasoning text.
if (reasoningText.length === 0) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `<think>${reasoningToken}`,
close: false,
error: false,
});
reasoningText += `<think>${reasoningToken}`;
continue;
} else {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: reasoningToken,
close: false,
error: false,
});
reasoningText += reasoningToken;
}
}
// If the reasoning text is not empty, but the reasoning token is empty
// and the token text is not empty we need to close the reasoning text and begin sending the token text.
if (!!reasoningText && !reasoningToken && token) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `</think>`,
close: false,
error: false,
});
fullText += `${reasoningText}</think>`;
reasoningText = "";
}
if (token) {
fullText += token;
// If we never saw a usage metric, we can estimate them by number of completion chunks
if (!hasUsageMetrics) usage.completion_tokens++;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
// LocalAi returns '' and others return null on chunks - the last chunk is not "" or null.
// Either way, the key `finish_reason` must be present to determine ending chunk.
if (
message?.hasOwnProperty("finish_reason") && // Got valid message and it is an object with finish_reason
message.finish_reason !== "" &&
message.finish_reason !== null
) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
break; // Break streaming when a valid finish_reason is first encountered
}
}
} catch (e) {
console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
writeResponseChunk(response, {
uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
stream?.endMeasurement(usage);
resolve(fullText); // Return what we currently have - if anything.
}
});
}
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
async function giteeAiModels() {
const url = new URL("https://ai.gitee.com/v1/models");
url.searchParams.set("type", "text2text");
return await fetch(url.toString(), {
method: "GET",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${process.env.GITEE_AI_API_KEY}`,
},
})
.then((res) => res.json())
.then(({ data = [] }) => data)
.then((models = []) => {
const validModels = {};
models.forEach(
(model) =>
(validModels[model.id] = {
id: model.id,
name: model.id,
organization: model.owned_by,
})
);
// Cache all response information
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(validModels),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return validModels;
})
.catch((e) => {
console.error(e);
return {};
});
}
module.exports = {
GiteeAILLM,
giteeAiModels,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/perplexity/index.js | server/utils/AiProviders/perplexity/index.js | const { v4: uuidv4 } = require("uuid");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
writeResponseChunk,
clientAbortedHandler,
} = require("../../helpers/chat/responses");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
function perplexityModels() {
const { MODELS } = require("./models.js");
return MODELS || {};
}
class PerplexityLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.PERPLEXITY_API_KEY)
throw new Error("No Perplexity API key was set.");
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
baseURL: "https://api.perplexity.ai",
apiKey: process.env.PERPLEXITY_API_KEY ?? null,
});
this.model =
modelPreference ||
process.env.PERPLEXITY_MODEL_PREF ||
"llama-3-sonar-large-32k-online"; // Give at least a unique model to the provider as last fallback.
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
allModelInformation() {
return perplexityModels();
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
const availableModels = perplexityModels();
return availableModels[modelName]?.maxLength || 4096;
}
promptWindowLimit() {
const availableModels = this.allModelInformation();
return availableModels[this.model]?.maxLength || 4096;
}
async isValidChatCompletionModel(model = "") {
const availableModels = this.allModelInformation();
return availableModels.hasOwnProperty(model);
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Perplexity chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage?.prompt_tokens || 0,
completion_tokens: result.output.usage?.completion_tokens || 0,
total_tokens: result.output.usage?.total_tokens || 0,
outputTps: result.output.usage?.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Perplexity chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
/**
* Enrich a token with citations if available for in-line citations.
* @param {string} token - The token to enrich.
* @param {Array} citations - The citations to enrich the token with.
* @returns {string} The enriched token.
*/
enrichToken(token, citations) {
if (!Array.isArray(citations) || citations.length === 0) return token;
return token.replace(/\[(\d+)\]/g, (match, index) => {
const citationIndex = parseInt(index) - 1;
return citations[citationIndex]
? `[[${index}](${citations[citationIndex]})]`
: match;
});
}
handleStream(response, stream, responseProps) {
const timeoutThresholdMs = 800;
const { uuid = uuidv4(), sources = [] } = responseProps;
let hasUsageMetrics = false;
let pplxCitations = []; // Array of links
let usage = {
completion_tokens: 0,
};
return new Promise(async (resolve) => {
let fullText = "";
let lastChunkTime = null;
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
const timeoutCheck = setInterval(() => {
if (lastChunkTime === null) return;
const now = Number(new Date());
const diffMs = now - lastChunkTime;
if (diffMs >= timeoutThresholdMs) {
console.log(
`Perplexity stream did not self-close and has been stale for >${timeoutThresholdMs}ms. Closing response stream.`
);
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
clearInterval(timeoutCheck);
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
}
}, 500);
// Now handle the chunks from the streamed response and append to fullText.
try {
for await (const chunk of stream) {
lastChunkTime = Number(new Date());
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
if (Array.isArray(chunk.citations) && chunk.citations.length !== 0) {
pplxCitations = chunk.citations;
}
// If we see usage metrics in the chunk, we can use them directly
// instead of estimating them, but we only want to assign values if
// the response object is the exact same key:value pair we expect.
if (
chunk.hasOwnProperty("usage") && // exists
!!chunk.usage && // is not null
Object.values(chunk.usage).length > 0 // has values
) {
if (chunk.usage.hasOwnProperty("prompt_tokens")) {
usage.prompt_tokens = Number(chunk.usage.prompt_tokens);
}
if (chunk.usage.hasOwnProperty("completion_tokens")) {
hasUsageMetrics = true; // to stop estimating counter
usage.completion_tokens = Number(chunk.usage.completion_tokens);
}
}
if (token) {
let enrichedToken = this.enrichToken(token, pplxCitations);
fullText += enrichedToken;
if (!hasUsageMetrics) usage.completion_tokens++;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: enrichedToken,
close: false,
error: false,
});
}
if (message?.finish_reason) {
console.log("closing");
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
clearInterval(timeoutCheck);
resolve(fullText);
break; // Break streaming when a valid finish_reason is first encountered
}
}
} catch (e) {
console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
writeResponseChunk(response, {
uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
stream?.endMeasurement(usage);
clearInterval(timeoutCheck);
resolve(fullText); // Return what we currently have - if anything.
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
PerplexityLLM,
perplexityModels,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/perplexity/models.js | server/utils/AiProviders/perplexity/models.js | const MODELS = {
"sonar-reasoning-pro": {
id: "sonar-reasoning-pro",
name: "sonar-reasoning-pro",
maxLength: 127072,
},
"sonar-reasoning": {
id: "sonar-reasoning",
name: "sonar-reasoning",
maxLength: 127072,
},
"sonar-pro": {
id: "sonar-pro",
name: "sonar-pro",
maxLength: 200000,
},
sonar: {
id: "sonar",
name: "sonar",
maxLength: 127072,
},
};
module.exports.MODELS = MODELS;
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/deepseek/index.js | server/utils/AiProviders/deepseek/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const { v4: uuidv4 } = require("uuid");
const { MODEL_MAP } = require("../modelMap");
const {
writeResponseChunk,
clientAbortedHandler,
} = require("../../helpers/chat/responses");
class DeepSeekLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.DEEPSEEK_API_KEY)
throw new Error("No DeepSeek API key was set.");
this.className = "DeepSeekLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.openai = new OpenAIApi({
apiKey: process.env.DEEPSEEK_API_KEY,
baseURL: "https://api.deepseek.com/v1",
});
this.model =
modelPreference || process.env.DEEPSEEK_MODEL_PREF || "deepseek-chat";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.log(
`Initialized ${this.model} with context window ${this.promptWindowLimit()}`
);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
return MODEL_MAP.get("deepseek", modelName) ?? 8192;
}
promptWindowLimit() {
return MODEL_MAP.get("deepseek", this.model) ?? 8192;
}
async isValidChatCompletionModel(modelName = "") {
const models = await this.openai.models.list().catch(() => ({ data: [] }));
return models.data.some((model) => model.id === modelName);
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
/**
* Parses and prepends reasoning from the response and returns the full text response.
* @param {Object} response
* @returns {string}
*/
#parseReasoningFromResponse({ message }) {
let textResponse = message?.content;
if (
!!message?.reasoning_content &&
message.reasoning_content.trim().length > 0
)
textResponse = `<think>${message.reasoning_content}</think>${textResponse}`;
return textResponse;
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`DeepSeek chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result?.output?.hasOwnProperty("choices") ||
result?.output?.choices?.length === 0
)
throw new Error(
`Invalid response body returned from DeepSeek: ${JSON.stringify(result.output)}`
);
return {
textResponse: this.#parseReasoningFromResponse(result.output.choices[0]),
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`DeepSeek chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
});
return measuredStreamRequest;
}
// TODO: This is a copy of the generic handleStream function in responses.js
// to specifically handle the DeepSeek reasoning model `reasoning_content` field.
// When or if ever possible, we should refactor this to be in the generic function.
handleStream(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
let hasUsageMetrics = false;
let usage = {
completion_tokens: 0,
};
return new Promise(async (resolve) => {
let fullText = "";
let reasoningText = "";
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
const reasoningToken = message?.delta?.reasoning_content;
if (
chunk.hasOwnProperty("usage") && // exists
!!chunk.usage && // is not null
Object.values(chunk.usage).length > 0 // has values
) {
if (chunk.usage.hasOwnProperty("prompt_tokens")) {
usage.prompt_tokens = Number(chunk.usage.prompt_tokens);
}
if (chunk.usage.hasOwnProperty("completion_tokens")) {
hasUsageMetrics = true; // to stop estimating counter
usage.completion_tokens = Number(chunk.usage.completion_tokens);
}
}
// Reasoning models will always return the reasoning text before the token text.
if (reasoningToken) {
// If the reasoning text is empty (''), we need to initialize it
// and send the first chunk of reasoning text.
if (reasoningText.length === 0) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `<think>${reasoningToken}`,
close: false,
error: false,
});
reasoningText += `<think>${reasoningToken}`;
continue;
} else {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: reasoningToken,
close: false,
error: false,
});
reasoningText += reasoningToken;
}
}
// If the reasoning text is not empty, but the reasoning token is empty
// and the token text is not empty we need to close the reasoning text and begin sending the token text.
if (!!reasoningText && !reasoningToken && token) {
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: `</think>`,
close: false,
error: false,
});
fullText += `${reasoningText}</think>`;
reasoningText = "";
}
if (token) {
fullText += token;
// If we never saw a usage metric, we can estimate them by number of completion chunks
if (!hasUsageMetrics) usage.completion_tokens++;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
// LocalAi returns '' and others return null on chunks - the last chunk is not "" or null.
// Either way, the key `finish_reason` must be present to determine ending chunk.
if (
message?.hasOwnProperty("finish_reason") && // Got valid message and it is an object with finish_reason
message.finish_reason !== "" &&
message.finish_reason !== null
) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
break; // Break streaming when a valid finish_reason is first encountered
}
}
} catch (e) {
console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
writeResponseChunk(response, {
uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
stream?.endMeasurement(usage);
resolve(fullText); // Return what we currently have - if anything.
}
});
}
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
module.exports = {
DeepSeekLLM,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/nvidiaNim/index.js | server/utils/AiProviders/nvidiaNim/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
formatChatHistory,
} = require("../../helpers/chat/responses");
class NvidiaNimLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.NVIDIA_NIM_LLM_BASE_PATH)
throw new Error("No NVIDIA NIM API Base Path was set.");
this.className = "NvidiaNimLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.nvidiaNim = new OpenAIApi({
baseURL: parseNvidiaNimBasePath(process.env.NVIDIA_NIM_LLM_BASE_PATH),
apiKey: null,
});
this.model = modelPreference || process.env.NVIDIA_NIM_LLM_MODEL_PREF;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.#log(
`Loaded with model: ${this.model} with context window: ${this.promptWindowLimit()}`
);
}
#log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
/**
* Set the model token limit `NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT` for the given model ID
* @param {string} modelId
* @param {string} basePath
* @returns {Promise<void>}
*/
static async setModelTokenLimit(modelId, basePath = null) {
if (!modelId) return;
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
baseURL: parseNvidiaNimBasePath(
basePath || process.env.NVIDIA_NIM_LLM_BASE_PATH
),
apiKey: null,
});
const model = await openai.models
.list()
.then((results) => results.data)
.catch(() => {
return [];
});
if (!model.length) return;
const modelInfo = model.find((model) => model.id === modelId);
if (!modelInfo) return;
process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT = Number(
modelInfo.max_model_len || 4096
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(_modelName) {
const limit = process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No NVIDIA NIM token context limit was set.");
return Number(limit);
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const limit = process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No NVIDIA NIM token context limit was set.");
return Number(limit);
}
async isValidChatCompletionModel(_ = "") {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`NVIDIA NIM chat: ${this.model} is not valid or defined model for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.nvidiaNim.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`NVIDIA NIM chat: ${this.model} is not valid or defined model for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.nvidiaNim.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
/**
* Parse the base path for the Nvidia NIM container API. Since the base path must end in /v1 and cannot have a trailing slash,
* and the user can possibly set it to anything and likely incorrectly due to pasting behaviors, we need to ensure it is in the correct format.
* @param {string} basePath
* @returns {string}
*/
function parseNvidiaNimBasePath(providedBasePath = "") {
try {
const baseURL = new URL(providedBasePath);
const basePath = `${baseURL.origin}/v1`;
return basePath;
} catch (e) {
return providedBasePath;
}
}
module.exports = {
NvidiaNimLLM,
parseNvidiaNimBasePath,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/novita/index.js | server/utils/AiProviders/novita/index.js | const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const { v4: uuidv4 } = require("uuid");
const {
writeResponseChunk,
clientAbortedHandler,
formatChatHistory,
} = require("../../helpers/chat/responses");
const fs = require("fs");
const path = require("path");
const { safeJsonParse } = require("../../http");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "novita")
: path.resolve(__dirname, `../../../storage/models/novita`)
);
class NovitaLLM {
defaultTimeout = 3_000;
constructor(embedder = null, modelPreference = null) {
if (!process.env.NOVITA_LLM_API_KEY)
throw new Error("No Novita API key was set.");
this.className = "NovitaLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.basePath = "https://api.novita.ai/v3/openai";
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: process.env.NOVITA_LLM_API_KEY ?? null,
defaultHeaders: {
"HTTP-Referer": "https://anythingllm.com",
"X-Novita-Source": "anythingllm",
},
});
this.model =
modelPreference ||
process.env.NOVITA_LLM_MODEL_PREF ||
"deepseek/deepseek-r1";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.timeout = this.#parseTimeout();
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
this.log(`Loaded with model: ${this.model}`);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
}
/**
* Novita has various models that never return `finish_reasons` and thus leave the stream open
* which causes issues in subsequent messages. This timeout value forces us to close the stream after
* x milliseconds. This is a configurable value via the NOVITA_LLM_TIMEOUT_MS value
* @returns {number} The timeout value in milliseconds (default: 3_000)
*/
#parseTimeout() {
this.log(
`Novita timeout is set to ${process.env.NOVITA_LLM_TIMEOUT_MS ?? this.defaultTimeout}ms`
);
if (isNaN(Number(process.env.NOVITA_LLM_TIMEOUT_MS)))
return this.defaultTimeout;
const setValue = Number(process.env.NOVITA_LLM_TIMEOUT_MS);
if (setValue < 500) return 500; // 500ms is the minimum timeout
return setValue;
}
// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
// from the current date. If it is, then we will refetch the API so that all the models are up
// to date.
#cacheIsStale() {
const MAX_STALE = 6.048e8; // 1 Week in MS
if (!fs.existsSync(this.cacheAtPath)) return true;
const now = Number(new Date());
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
return now - timestampMs > MAX_STALE;
}
// The Novita model API has a lot of models, so we cache this locally in the directory
// as if the cache directory JSON file is stale or does not exist we will fetch from API and store it.
// This might slow down the first request, but we need the proper token context window
// for each model and this is a constructor property - so we can really only get it if this cache exists.
// We used to have this as a chore, but given there is an API to get the info - this makes little sense.
async #syncModels() {
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
return false;
this.log("Model cache is not present or stale. Fetching from Novita API.");
await fetchNovitaModels();
return;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
const cacheModelPath = path.resolve(cacheFolder, "models.json");
const availableModels = fs.existsSync(cacheModelPath)
? safeJsonParse(
fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
{}
)
: {};
return availableModels[modelName]?.maxLength || 4096;
}
promptWindowLimit() {
const availableModels = this.models();
return availableModels[this.model]?.maxLength || 4096;
}
async isValidChatCompletionModel(model = "") {
await this.#syncModels();
const availableModels = this.models();
return availableModels.hasOwnProperty(model);
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Novita chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`Novita chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages,
runPromptTokenCalculation: true,
modelTag: this.model,
});
return measuredStreamRequest;
}
/**
* Handles the default stream response for a chat.
* @param {import("express").Response} response
* @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
* @param {Object} responseProps
* @returns {Promise<string>}
*/
handleStream(response, stream, responseProps) {
const timeoutThresholdMs = this.timeout;
const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => {
let fullText = "";
let lastChunkTime = null; // null when first token is still not received.
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
// NOTICE: Not all Novita models will return a stop reason
// which keeps the connection open and so the model never finalizes the stream
// like the traditional OpenAI response schema does. So in the case the response stream
// never reaches a formal close state we maintain an interval timer that if we go >=timeoutThresholdMs with
// no new chunks then we kill the stream and assume it to be complete. Novita is quite fast
// so this threshold should permit most responses, but we can adjust `timeoutThresholdMs` if
// we find it is too aggressive.
const timeoutCheck = setInterval(() => {
if (lastChunkTime === null) return;
const now = Number(new Date());
const diffMs = now - lastChunkTime;
if (diffMs >= timeoutThresholdMs) {
this.log(
`Novita stream did not self-close and has been stale for >${timeoutThresholdMs}ms. Closing response stream.`
);
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
clearInterval(timeoutCheck);
response.removeListener("close", handleAbort);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
}, 500);
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
lastChunkTime = Number(new Date());
if (token) {
fullText += token;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
if (message?.finish_reason !== null) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
}
} catch (e) {
writeResponseChunk(response, {
uuid,
sources,
type: "abort",
textResponse: null,
close: true,
error: e.message,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement({
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
});
resolve(fullText);
}
});
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
async function fetchNovitaModels() {
return await fetch(`https://api.novita.ai/v3/openai/models`, {
method: "GET",
headers: {
"Content-Type": "application/json",
},
})
.then((res) => res.json())
.then(({ data = [] }) => {
const models = {};
data.forEach((model) => {
models[model.id] = {
id: model.id,
name: model.title,
organization:
model.id.split("/")[0].charAt(0).toUpperCase() +
model.id.split("/")[0].slice(1),
maxLength: model.context_size,
};
});
// Cache all response information
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(models),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return models;
})
.catch((e) => {
console.error(e);
return {};
});
}
module.exports = {
NovitaLLM,
fetchNovitaModels,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/gemini/index.js | server/utils/AiProviders/gemini/index.js | const fs = require("fs");
const path = require("path");
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
formatChatHistory,
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
const { MODEL_MAP } = require("../modelMap");
const { defaultGeminiModels, v1BetaModels } = require("./defaultModels");
const { safeJsonParse } = require("../../http");
const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "gemini")
: path.resolve(__dirname, `../../../storage/models/gemini`)
);
const NO_SYSTEM_PROMPT_MODELS = [
"gemma-3-1b-it",
"gemma-3-4b-it",
"gemma-3-12b-it",
"gemma-3-27b-it",
];
class GeminiLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.GEMINI_API_KEY)
throw new Error("No Gemini API key was set.");
this.className = "GeminiLLM";
const { OpenAI: OpenAIApi } = require("openai");
this.model =
modelPreference ||
process.env.GEMINI_LLM_MODEL_PREF ||
"gemini-2.0-flash-lite";
const isExperimental = this.isExperimentalModel(this.model);
this.openai = new OpenAIApi({
apiKey: process.env.GEMINI_API_KEY,
// Even models that are v1 in gemini API can be used with v1beta/openai/ endpoint and nobody knows why.
baseURL: "https://generativelanguage.googleapis.com/v1beta/openai/",
});
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
this.#log(
`Initialized with model: ${this.model} ${isExperimental ? "[Experimental v1beta]" : "[Stable v1]"} - ctx: ${this.promptWindowLimit()}`
);
}
/**
* Checks if the model supports system prompts
* This is a static list of models that are known to not support system prompts
* since this information is not available in the API model response.
* @returns {boolean}
*/
get supportsSystemPrompt() {
return !NO_SYSTEM_PROMPT_MODELS.includes(this.model);
}
#log(text, ...args) {
console.log(`\x1b[32m[${this.className}]\x1b[0m ${text}`, ...args);
}
// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
// from the current date. If it is, then we will refetch the API so that all the models are up
// to date.
static cacheIsStale() {
const MAX_STALE = 8.64e7; // 1 day in MS
if (!fs.existsSync(path.resolve(cacheFolder, ".cached_at"))) return true;
const now = Number(new Date());
const timestampMs = Number(
fs.readFileSync(path.resolve(cacheFolder, ".cached_at"))
);
return now - timestampMs > MAX_STALE;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
try {
const cacheModelPath = path.resolve(cacheFolder, "models.json");
if (!fs.existsSync(cacheModelPath))
return MODEL_MAP.get("gemini", modelName) ?? 30_720;
const models = safeJsonParse(fs.readFileSync(cacheModelPath));
const model = models.find((model) => model.id === modelName);
if (!model)
throw new Error(
"Model not found in cache - falling back to default model."
);
return model.contextWindow;
} catch (e) {
console.error(`GeminiLLM:promptWindowLimit`, e.message);
return MODEL_MAP.get("gemini", modelName) ?? 30_720;
}
}
promptWindowLimit() {
try {
if (!fs.existsSync(this.cacheModelPath))
return MODEL_MAP.get("gemini", this.model) ?? 30_720;
const models = safeJsonParse(fs.readFileSync(this.cacheModelPath));
const model = models.find((model) => model.id === this.model);
if (!model)
throw new Error(
"Model not found in cache - falling back to default model."
);
return model.contextWindow;
} catch (e) {
console.error(`GeminiLLM:promptWindowLimit`, e.message);
return MODEL_MAP.get("gemini", this.model) ?? 30_720;
}
}
/**
* Checks if a model is experimental by reading from the cache if available, otherwise it will perform
* a blind check against the v1BetaModels list - which is manually maintained and updated.
* @param {string} modelName - The name of the model to check
* @returns {boolean} A boolean indicating if the model is experimental
*/
isExperimentalModel(modelName) {
if (
fs.existsSync(cacheFolder) &&
fs.existsSync(path.resolve(cacheFolder, "models.json"))
) {
const models = safeJsonParse(
fs.readFileSync(path.resolve(cacheFolder, "models.json"))
);
const model = models.find((model) => model.id === modelName);
if (!model) return false;
return model.experimental;
}
return modelName.includes("exp") || v1BetaModels.includes(modelName);
}
/**
* Fetches Gemini models from the Google Generative AI API
* @param {string} apiKey - The API key to use for the request
* @param {number} limit - The maximum number of models to fetch
* @param {string} pageToken - The page token to use for pagination
* @returns {Promise<[{id: string, name: string, contextWindow: number, experimental: boolean}]>} A promise that resolves to an array of Gemini models
*/
static async fetchModels(apiKey, limit = 1_000, pageToken = null) {
if (!apiKey) return [];
if (fs.existsSync(cacheFolder) && !this.cacheIsStale()) {
console.log(
`\x1b[32m[GeminiLLM]\x1b[0m Using cached models API response.`
);
return safeJsonParse(
fs.readFileSync(path.resolve(cacheFolder, "models.json"))
);
}
const stableModels = [];
const allModels = [];
// Fetch from v1
try {
const url = new URL(
"https://generativelanguage.googleapis.com/v1/models"
);
url.searchParams.set("pageSize", limit);
url.searchParams.set("key", apiKey);
if (pageToken) url.searchParams.set("pageToken", pageToken);
await fetch(url.toString(), {
method: "GET",
headers: { "Content-Type": "application/json" },
})
.then((res) => res.json())
.then((data) => {
if (data.error) throw new Error(data.error.message);
return data.models ?? [];
})
.then((models) => {
return models
.filter(
(model) => !model.displayName?.toLowerCase()?.includes("tuning")
) // remove tuning models
.filter(
(model) =>
!model.description?.toLowerCase()?.includes("deprecated")
) // remove deprecated models (in comment)
.filter((model) =>
// Only generateContent is supported
model.supportedGenerationMethods.includes("generateContent")
)
.map((model) => {
stableModels.push(model.name);
allModels.push({
id: model.name.split("/").pop(),
name: model.displayName,
contextWindow: model.inputTokenLimit,
experimental: false,
});
});
})
.catch((e) => {
console.error(`Gemini:getGeminiModelsV1`, e.message);
return;
});
} catch (e) {
console.error(`Gemini:getGeminiModelsV1`, e.message);
}
// Fetch from v1beta
try {
const url = new URL(
"https://generativelanguage.googleapis.com/v1beta/models"
);
url.searchParams.set("pageSize", limit);
url.searchParams.set("key", apiKey);
if (pageToken) url.searchParams.set("pageToken", pageToken);
await fetch(url.toString(), {
method: "GET",
headers: { "Content-Type": "application/json" },
})
.then((res) => res.json())
.then((data) => {
if (data.error) throw new Error(data.error.message);
return data.models ?? [];
})
.then((models) => {
return models
.filter((model) => !stableModels.includes(model.name)) // remove stable models that are already in the v1 list
.filter(
(model) => !model.displayName?.toLowerCase()?.includes("tuning")
) // remove tuning models
.filter(
(model) =>
!model.description?.toLowerCase()?.includes("deprecated")
) // remove deprecated models (in comment)
.filter((model) =>
// Only generateContent is supported
model.supportedGenerationMethods.includes("generateContent")
)
.map((model) => {
allModels.push({
id: model.name.split("/").pop(),
name: model.displayName,
contextWindow: model.inputTokenLimit,
experimental: true,
});
});
})
.catch((e) => {
console.error(`Gemini:getGeminiModelsV1beta`, e.message);
return;
});
} catch (e) {
console.error(`Gemini:getGeminiModelsV1beta`, e.message);
}
if (allModels.length === 0) {
console.error(`Gemini:getGeminiModels - No models found`);
return defaultGeminiModels();
}
console.log(
`\x1b[32m[GeminiLLM]\x1b[0m Writing cached models API response to disk.`
);
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(allModels)
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
new Date().getTime().toString()
);
return allModels;
}
/**
* Checks if a model is valid for chat completion (unused)
* @deprecated
* @param {string} modelName - The name of the model to check
* @returns {Promise<boolean>} A promise that resolves to a boolean indicating if the model is valid
*/
async isValidChatCompletionModel(modelName = "") {
const models = await this.fetchModels(process.env.GEMINI_API_KEY);
return models.some((model) => model.id === modelName);
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) return userPrompt;
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "high",
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [], // This is the specific attachment for only this prompt
}) {
let prompt = [];
if (this.supportsSystemPrompt) {
prompt.push({
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
});
} else {
this.#log(
`${this.model} - does not support system prompts - emulating...`
);
prompt.push(
{
role: "user",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
},
{
role: "assistant",
content: "Okay.",
}
);
}
return [
...prompt,
...formatChatHistory(chatHistory, this.#generateContent),
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature: temperature,
})
.catch((e) => {
console.error(e);
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
model: this.model,
timestamp: new Date(),
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream({
func: this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature: temperature,
stream_options: {
include_usage: true,
},
}),
messages,
runPromptTokenCalculation: false,
modelTag: this.model,
});
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
}
module.exports = {
GeminiLLM,
NO_SYSTEM_PROMPT_MODELS,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/AiProviders/gemini/defaultModels.js | server/utils/AiProviders/gemini/defaultModels.js | const { MODEL_MAP } = require("../modelMap");
const stableModels = [
// %STABLE_MODELS% - updated 2025-05-13T23:13:58.920Z
"gemini-1.5-pro-001",
"gemini-1.5-pro-002",
"gemini-1.5-pro",
"gemini-1.5-flash-001",
"gemini-1.5-flash",
"gemini-1.5-flash-002",
"gemini-1.5-flash-8b",
"gemini-1.5-flash-8b-001",
"gemini-2.0-flash",
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-001",
"gemini-2.0-flash-lite",
"gemini-2.0-flash-preview-image-generation",
// %EOC_STABLE_MODELS%
];
// There are some models that are only available in the v1beta API
// and some models that are only available in the v1 API
// generally, v1beta models have `exp` in the name, but not always
// so we check for both against a static list as well via API.
const v1BetaModels = [
// %V1BETA_MODELS% - updated 2025-05-13T23:13:58.920Z
"gemini-1.5-pro-latest",
"gemini-1.5-flash-latest",
"gemini-1.5-flash-8b-latest",
"gemini-1.5-flash-8b-exp-0827",
"gemini-1.5-flash-8b-exp-0924",
"gemini-2.5-pro-exp-03-25",
"gemini-2.5-pro-preview-03-25",
"gemini-2.5-flash-preview-04-17",
"gemini-2.5-flash-preview-04-17-thinking",
"gemini-2.5-pro-preview-05-06",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-exp-image-generation",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-lite-preview",
"gemini-2.0-pro-exp",
"gemini-2.0-pro-exp-02-05",
"gemini-exp-1206",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp-1219",
"learnlm-1.5-pro-experimental",
"learnlm-2.0-flash-experimental",
"gemma-3-1b-it",
"gemma-3-4b-it",
"gemma-3-12b-it",
"gemma-3-27b-it",
// %EOC_V1BETA_MODELS%
];
const defaultGeminiModels = () => [
...stableModels.map((model) => ({
id: model,
name: model,
contextWindow: MODEL_MAP.get("gemini", model),
experimental: false,
})),
...v1BetaModels.map((model) => ({
id: model,
name: model,
contextWindow: MODEL_MAP.get("gemini", model),
experimental: true,
})),
];
module.exports = {
defaultGeminiModels,
v1BetaModels,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/logger/index.js | server/utils/logger/index.js | const winston = require("winston");
class Logger {
logger = console;
static _instance;
constructor() {
if (Logger._instance) return Logger._instance;
this.logger =
process.env.NODE_ENV === "production" ? this.getWinstonLogger() : console;
Logger._instance = this;
}
getWinstonLogger() {
const logger = winston.createLogger({
level: "info",
defaultMeta: { service: "backend" },
transports: [
new winston.transports.Console({
format: winston.format.combine(
winston.format.colorize(),
winston.format.printf(
({ level, message, service, origin = "" }) => {
return `\x1b[36m[${service}]\x1b[0m${origin ? `\x1b[33m[${origin}]\x1b[0m` : ""} ${level}: ${message}`;
}
)
),
}),
],
});
function formatArgs(args) {
return args
.map((arg) => {
if (arg instanceof Error) {
return arg.stack; // If argument is an Error object, return its stack trace
} else if (typeof arg === "object") {
return JSON.stringify(arg); // Convert objects to JSON string
} else {
return arg; // Otherwise, return as-is
}
})
.join(" ");
}
console.log = function (...args) {
logger.info(formatArgs(args));
};
console.error = function (...args) {
logger.error(formatArgs(args));
};
console.info = function (...args) {
logger.warn(formatArgs(args));
};
return logger;
}
}
/**
* Sets and overrides Console methods for logging when called.
* This is a singleton method and will not create multiple loggers.
* @returns {winston.Logger | console} - instantiated logger interface.
*/
function setLogger() {
return new Logger().logger;
}
module.exports = setLogger;
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/BackgroundWorkers/index.js | server/utils/BackgroundWorkers/index.js | const path = require("path");
const Graceful = require("@ladjs/graceful");
const Bree = require("@mintplex-labs/bree");
const setLogger = require("../logger");
class BackgroundService {
name = "BackgroundWorkerService";
static _instance = null;
documentSyncEnabled = false;
#root = path.resolve(__dirname, "../../jobs");
#alwaysRunJobs = [
{
name: "cleanup-orphan-documents",
timeout: "1m",
interval: "12hr",
},
];
#documentSyncJobs = [
// Job for auto-sync of documents
// https://github.com/breejs/bree
{
name: "sync-watched-documents",
interval: "1hr",
},
];
constructor() {
if (BackgroundService._instance) {
this.#log("SINGLETON LOCK: Using existing BackgroundService.");
return BackgroundService._instance;
}
this.logger = setLogger();
BackgroundService._instance = this;
}
#log(text, ...args) {
console.log(`\x1b[36m[${this.name}]\x1b[0m ${text}`, ...args);
}
async boot() {
const { DocumentSyncQueue } = require("../../models/documentSyncQueue");
this.documentSyncEnabled = await DocumentSyncQueue.enabled();
const jobsToRun = this.jobs();
this.#log("Starting...");
this.bree = new Bree({
logger: this.logger,
root: this.#root,
jobs: jobsToRun,
errorHandler: this.onError,
workerMessageHandler: this.onWorkerMessageHandler,
runJobsAs: "process",
});
this.graceful = new Graceful({ brees: [this.bree], logger: this.logger });
this.graceful.listen();
this.bree.start();
this.#log(
`Service started with ${jobsToRun.length} jobs`,
jobsToRun.map((j) => j.name)
);
}
async stop() {
this.#log("Stopping...");
if (!!this.graceful && !!this.bree) this.graceful.stopBree(this.bree, 0);
this.bree = null;
this.graceful = null;
this.#log("Service stopped");
}
/** @returns {import("@mintplex-labs/bree").Job[]} */
jobs() {
const activeJobs = [...this.#alwaysRunJobs];
if (this.documentSyncEnabled) activeJobs.push(...this.#documentSyncJobs);
return activeJobs;
}
onError(error, _workerMetadata) {
this.logger.error(`${error.message}`, {
service: "bg-worker",
origin: error.name,
});
}
onWorkerMessageHandler(message, _workerMetadata) {
this.logger.info(`${message.message}`, {
service: "bg-worker",
origin: message.name,
});
}
}
module.exports.BackgroundService = BackgroundService;
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/helpers/portAvailabilityChecker.js | server/utils/helpers/portAvailabilityChecker.js | // Get all loopback addresses that are available for use or binding.
function getLocalHosts() {
const os = require("os");
const interfaces = os.networkInterfaces();
const results = new Set([undefined, "0.0.0.0"]);
for (const _interface of Object.values(interfaces)) {
for (const config of _interface) {
results.add(config.address);
}
}
return Array.from(results);
}
function checkPort(options = {}) {
const net = require("net");
return new Promise((resolve, reject) => {
const server = net.createServer();
server.unref();
server.on("error", reject);
server.listen(options, () => {
server.close(() => {
resolve(true);
});
});
});
}
async function isPortInUse(port, host) {
try {
await checkPort({ port, host });
return true;
} catch (error) {
if (!["EADDRNOTAVAIL", "EINVAL"].includes(error.code)) {
return false;
}
}
return false;
}
module.exports = {
isPortInUse,
getLocalHosts,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/helpers/tiktoken.js | server/utils/helpers/tiktoken.js | const { getEncodingNameForModel, getEncoding } = require("js-tiktoken");
/**
* @class TokenManager
*
* @notice
* We cannot do estimation of tokens here like we do in the collector
* because we need to know the model to do it.
* Other issues are we also do reverse tokenization here for the chat history during cannonballing.
* So here we are stuck doing the actual tokenization and encoding until we figure out what to do with prompt overflows.
*/
class TokenManager {
static instance = null;
static currentModel = null;
constructor(model = "gpt-3.5-turbo") {
if (TokenManager.instance && TokenManager.currentModel === model) {
this.log("Returning existing instance for model:", model);
return TokenManager.instance;
}
this.model = model;
this.encoderName = this.#getEncodingFromModel(model);
this.encoder = getEncoding(this.encoderName);
TokenManager.instance = this;
TokenManager.currentModel = model;
this.log("Initialized new TokenManager instance for model:", model);
return this;
}
log(text, ...args) {
console.log(`\x1b[35m[TokenManager]\x1b[0m ${text}`, ...args);
}
#getEncodingFromModel(model) {
try {
return getEncodingNameForModel(model);
} catch {
return "cl100k_base";
}
}
/**
* Pass in an empty array of disallowedSpecials to handle all tokens as text and to be tokenized.
* @param {string} input
* @returns {number[]}
*/
tokensFromString(input = "") {
try {
const tokens = this.encoder.encode(String(input), undefined, []);
return tokens;
} catch (e) {
console.error(e);
return [];
}
}
/**
* Converts an array of tokens back to a string.
* @param {number[]} tokens
* @returns {string}
*/
bytesFromTokens(tokens = []) {
const bytes = this.encoder.decode(tokens);
return bytes;
}
/**
* Counts the number of tokens in a string.
* @param {string} input
* @returns {number}
*/
countFromString(input = "") {
const tokens = this.tokensFromString(input);
return tokens.length;
}
/**
* Estimates the number of tokens in a string or array of strings.
* @param {string | string[]} input
* @returns {number}
*/
statsFrom(input) {
if (typeof input === "string") return this.countFromString(input);
// What is going on here?
// https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb Item 6.
// The only option is to estimate. From repeated testing using the static values in the code we are always 2 off,
// which means as of Nov 1, 2023 the additional factor on ln: 476 changed from 3 to 5.
if (Array.isArray(input)) {
const perMessageFactorTokens = input.length * 3;
const tokensFromContent = input.reduce(
(a, b) => a + this.countFromString(b.content),
0
);
const diffCoefficient = 5;
return perMessageFactorTokens + tokensFromContent + diffCoefficient;
}
throw new Error("Not a supported tokenized format.");
}
}
module.exports = {
TokenManager,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/helpers/index.js | server/utils/helpers/index.js | /**
* File Attachment for automatic upload on the chat container page.
* @typedef Attachment
* @property {string} name - the given file name
* @property {string} mime - the given file mime
* @property {string} contentString - full base64 encoded string of file
*/
/**
* @typedef {Object} ResponseMetrics
* @property {number} prompt_tokens - The number of prompt tokens used
* @property {number} completion_tokens - The number of completion tokens used
* @property {number} total_tokens - The total number of tokens used
* @property {number} outputTps - The output tokens per second
* @property {number} duration - The duration of the request in seconds
*
* @typedef {Object} ChatMessage
* @property {string} role - The role of the message sender (e.g. 'user', 'assistant', 'system')
* @property {string} content - The content of the message
*
* @typedef {Object} ChatCompletionResponse
* @property {string} textResponse - The text response from the LLM
* @property {ResponseMetrics} metrics - The response metrics
*
* @typedef {Object} ChatCompletionOptions
* @property {number} temperature - The sampling temperature for the LLM response
* @property {import("@prisma/client").users} user - The user object for the chat completion to send to the LLM provider for user tracking (optional)
*
* @typedef {function(Array<ChatMessage>, ChatCompletionOptions): Promise<ChatCompletionResponse>} getChatCompletionFunction
*
* @typedef {function(Array<ChatMessage>, ChatCompletionOptions): Promise<import("./chat/LLMPerformanceMonitor").MonitoredStream>} streamGetChatCompletionFunction
*/
/**
* @typedef {Object} BaseLLMProvider - A basic llm provider object
* @property {Function} streamingEnabled - Checks if streaming is enabled for chat completions.
* @property {Function} promptWindowLimit - Returns the token limit for the current model.
* @property {Function} isValidChatCompletionModel - Validates if the provided model is suitable for chat completion.
* @property {Function} constructPrompt - Constructs a formatted prompt for the chat completion request.
* @property {getChatCompletionFunction} getChatCompletion - Gets a chat completion response from OpenAI.
* @property {streamGetChatCompletionFunction} streamGetChatCompletion - Streams a chat completion response from OpenAI.
* @property {Function} handleStream - Handles the streaming response.
* @property {Function} embedTextInput - Embeds the provided text input using the specified embedder.
* @property {Function} embedChunks - Embeds multiple chunks of text using the specified embedder.
* @property {Function} compressMessages - Compresses chat messages to fit within the token limit.
*/
/**
* @typedef {Object} BaseLLMProviderClass - Class method of provider - not instantiated
* @property {function(string): number} promptWindowLimit - Returns the token limit for the provided model.
*/
/**
* @typedef {Object} BaseVectorDatabaseProvider
* @property {string} name - The name of the Vector Database instance.
* @property {Function} connect - Connects to the Vector Database client.
* @property {Function} totalVectors - Returns the total number of vectors in the database.
* @property {Function} namespaceCount - Returns the count of vectors in a given namespace.
* @property {Function} similarityResponse - Performs a similarity search on a given namespace.
* @property {Function} rerankedSimilarityResponse - Performs a similarity search on a given namespace with reranking (if supported by provider).
* @property {Function} namespace - Retrieves the specified namespace collection.
* @property {Function} hasNamespace - Checks if a namespace exists.
* @property {Function} namespaceExists - Verifies if a namespace exists in the client.
* @property {Function} deleteVectorsInNamespace - Deletes all vectors in a specified namespace.
* @property {Function} deleteDocumentFromNamespace - Deletes a document from a specified namespace.
* @property {Function} addDocumentToNamespace - Adds a document to a specified namespace.
* @property {Function} performSimilaritySearch - Performs a similarity search in the namespace.
*/
/**
* @typedef {Object} BaseEmbedderProvider
* @property {string} model - The model used for embedding.
* @property {number} maxConcurrentChunks - The maximum number of chunks processed concurrently.
* @property {number} embeddingMaxChunkLength - The maximum length of each chunk for embedding.
* @property {Function} embedTextInput - Embeds a single text input.
* @property {Function} embedChunks - Embeds multiple chunks of text.
*/
/**
* Gets the systems current vector database provider.
* @param {('pinecone' | 'chroma' | 'chromacloud' | 'lancedb' | 'weaviate' | 'qdrant' | 'milvus' | 'zilliz' | 'astra') | null} getExactly - If provided, this will return an explit provider.
* @returns { BaseVectorDatabaseProvider}
*/
function getVectorDbClass(getExactly = null) {
const vectorSelection = getExactly ?? process.env.VECTOR_DB ?? "lancedb";
switch (vectorSelection) {
case "pinecone":
const { Pinecone } = require("../vectorDbProviders/pinecone");
return Pinecone;
case "chroma":
const { Chroma } = require("../vectorDbProviders/chroma");
return Chroma;
case "chromacloud":
const { ChromaCloud } = require("../vectorDbProviders/chromacloud");
return ChromaCloud;
case "lancedb":
const { LanceDb } = require("../vectorDbProviders/lance");
return LanceDb;
case "weaviate":
const { Weaviate } = require("../vectorDbProviders/weaviate");
return Weaviate;
case "qdrant":
const { QDrant } = require("../vectorDbProviders/qdrant");
return QDrant;
case "milvus":
const { Milvus } = require("../vectorDbProviders/milvus");
return Milvus;
case "zilliz":
const { Zilliz } = require("../vectorDbProviders/zilliz");
return Zilliz;
case "astra":
const { AstraDB } = require("../vectorDbProviders/astra");
return AstraDB;
case "pgvector":
const { PGVector } = require("../vectorDbProviders/pgvector");
return PGVector;
default:
console.error(
`\x1b[31m[ENV ERROR]\x1b[0m No VECTOR_DB value found in environment! Falling back to LanceDB`
);
const { LanceDb: DefaultLanceDb } = require("../vectorDbProviders/lance");
return DefaultLanceDb;
}
}
/**
* Returns the LLMProvider with its embedder attached via system or via defined provider.
* @param {{provider: string | null, model: string | null} | null} params - Initialize params for LLMs provider
* @returns {BaseLLMProvider}
*/
function getLLMProvider({ provider = null, model = null } = {}) {
const LLMSelection = provider ?? process.env.LLM_PROVIDER ?? "openai";
const embedder = getEmbeddingEngineSelection();
switch (LLMSelection) {
case "openai":
const { OpenAiLLM } = require("../AiProviders/openAi");
return new OpenAiLLM(embedder, model);
case "azure":
const { AzureOpenAiLLM } = require("../AiProviders/azureOpenAi");
return new AzureOpenAiLLM(embedder, model);
case "anthropic":
const { AnthropicLLM } = require("../AiProviders/anthropic");
return new AnthropicLLM(embedder, model);
case "gemini":
const { GeminiLLM } = require("../AiProviders/gemini");
return new GeminiLLM(embedder, model);
case "lmstudio":
const { LMStudioLLM } = require("../AiProviders/lmStudio");
return new LMStudioLLM(embedder, model);
case "localai":
const { LocalAiLLM } = require("../AiProviders/localAi");
return new LocalAiLLM(embedder, model);
case "ollama":
const { OllamaAILLM } = require("../AiProviders/ollama");
return new OllamaAILLM(embedder, model);
case "togetherai":
const { TogetherAiLLM } = require("../AiProviders/togetherAi");
return new TogetherAiLLM(embedder, model);
case "fireworksai":
const { FireworksAiLLM } = require("../AiProviders/fireworksAi");
return new FireworksAiLLM(embedder, model);
case "perplexity":
const { PerplexityLLM } = require("../AiProviders/perplexity");
return new PerplexityLLM(embedder, model);
case "openrouter":
const { OpenRouterLLM } = require("../AiProviders/openRouter");
return new OpenRouterLLM(embedder, model);
case "mistral":
const { MistralLLM } = require("../AiProviders/mistral");
return new MistralLLM(embedder, model);
case "huggingface":
const { HuggingFaceLLM } = require("../AiProviders/huggingface");
return new HuggingFaceLLM(embedder, model);
case "groq":
const { GroqLLM } = require("../AiProviders/groq");
return new GroqLLM(embedder, model);
case "koboldcpp":
const { KoboldCPPLLM } = require("../AiProviders/koboldCPP");
return new KoboldCPPLLM(embedder, model);
case "textgenwebui":
const { TextGenWebUILLM } = require("../AiProviders/textGenWebUI");
return new TextGenWebUILLM(embedder, model);
case "cohere":
const { CohereLLM } = require("../AiProviders/cohere");
return new CohereLLM(embedder, model);
case "litellm":
const { LiteLLM } = require("../AiProviders/liteLLM");
return new LiteLLM(embedder, model);
case "generic-openai":
const { GenericOpenAiLLM } = require("../AiProviders/genericOpenAi");
return new GenericOpenAiLLM(embedder, model);
case "bedrock":
const { AWSBedrockLLM } = require("../AiProviders/bedrock");
return new AWSBedrockLLM(embedder, model);
case "deepseek":
const { DeepSeekLLM } = require("../AiProviders/deepseek");
return new DeepSeekLLM(embedder, model);
case "apipie":
const { ApiPieLLM } = require("../AiProviders/apipie");
return new ApiPieLLM(embedder, model);
case "novita":
const { NovitaLLM } = require("../AiProviders/novita");
return new NovitaLLM(embedder, model);
case "xai":
const { XAiLLM } = require("../AiProviders/xai");
return new XAiLLM(embedder, model);
case "nvidia-nim":
const { NvidiaNimLLM } = require("../AiProviders/nvidiaNim");
return new NvidiaNimLLM(embedder, model);
case "ppio":
const { PPIOLLM } = require("../AiProviders/ppio");
return new PPIOLLM(embedder, model);
case "moonshotai":
const { MoonshotAiLLM } = require("../AiProviders/moonshotAi");
return new MoonshotAiLLM(embedder, model);
case "dpais":
const { DellProAiStudioLLM } = require("../AiProviders/dellProAiStudio");
return new DellProAiStudioLLM(embedder, model);
case "cometapi":
const { CometApiLLM } = require("../AiProviders/cometapi");
return new CometApiLLM(embedder, model);
case "foundry":
const { FoundryLLM } = require("../AiProviders/foundry");
return new FoundryLLM(embedder, model);
case "zai":
const { ZAiLLM } = require("../AiProviders/zai");
return new ZAiLLM(embedder, model);
case "giteeai":
const { GiteeAILLM } = require("../AiProviders/giteeai");
return new GiteeAILLM(embedder, model);
default:
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
);
}
}
/**
* Returns the EmbedderProvider by itself to whatever is currently in the system settings.
* @returns {BaseEmbedderProvider}
*/
function getEmbeddingEngineSelection() {
const { NativeEmbedder } = require("../EmbeddingEngines/native");
const engineSelection = process.env.EMBEDDING_ENGINE;
switch (engineSelection) {
case "openai":
const { OpenAiEmbedder } = require("../EmbeddingEngines/openAi");
return new OpenAiEmbedder();
case "azure":
const {
AzureOpenAiEmbedder,
} = require("../EmbeddingEngines/azureOpenAi");
return new AzureOpenAiEmbedder();
case "localai":
const { LocalAiEmbedder } = require("../EmbeddingEngines/localAi");
return new LocalAiEmbedder();
case "ollama":
const { OllamaEmbedder } = require("../EmbeddingEngines/ollama");
return new OllamaEmbedder();
case "native":
return new NativeEmbedder();
case "lmstudio":
const { LMStudioEmbedder } = require("../EmbeddingEngines/lmstudio");
return new LMStudioEmbedder();
case "cohere":
const { CohereEmbedder } = require("../EmbeddingEngines/cohere");
return new CohereEmbedder();
case "voyageai":
const { VoyageAiEmbedder } = require("../EmbeddingEngines/voyageAi");
return new VoyageAiEmbedder();
case "litellm":
const { LiteLLMEmbedder } = require("../EmbeddingEngines/liteLLM");
return new LiteLLMEmbedder();
case "mistral":
const { MistralEmbedder } = require("../EmbeddingEngines/mistral");
return new MistralEmbedder();
case "generic-openai":
const {
GenericOpenAiEmbedder,
} = require("../EmbeddingEngines/genericOpenAi");
return new GenericOpenAiEmbedder();
case "gemini":
const { GeminiEmbedder } = require("../EmbeddingEngines/gemini");
return new GeminiEmbedder();
case "openrouter":
const { OpenRouterEmbedder } = require("../EmbeddingEngines/openRouter");
return new OpenRouterEmbedder();
default:
return new NativeEmbedder();
}
}
/**
* Returns the LLMProviderClass - this is a helper method to access static methods on a class
* @param {{provider: string | null} | null} params - Initialize params for LLMs provider
* @returns {BaseLLMProviderClass}
*/
function getLLMProviderClass({ provider = null } = {}) {
switch (provider) {
case "openai":
const { OpenAiLLM } = require("../AiProviders/openAi");
return OpenAiLLM;
case "azure":
const { AzureOpenAiLLM } = require("../AiProviders/azureOpenAi");
return AzureOpenAiLLM;
case "anthropic":
const { AnthropicLLM } = require("../AiProviders/anthropic");
return AnthropicLLM;
case "gemini":
const { GeminiLLM } = require("../AiProviders/gemini");
return GeminiLLM;
case "lmstudio":
const { LMStudioLLM } = require("../AiProviders/lmStudio");
return LMStudioLLM;
case "localai":
const { LocalAiLLM } = require("../AiProviders/localAi");
return LocalAiLLM;
case "ollama":
const { OllamaAILLM } = require("../AiProviders/ollama");
return OllamaAILLM;
case "togetherai":
const { TogetherAiLLM } = require("../AiProviders/togetherAi");
return TogetherAiLLM;
case "fireworksai":
const { FireworksAiLLM } = require("../AiProviders/fireworksAi");
return FireworksAiLLM;
case "perplexity":
const { PerplexityLLM } = require("../AiProviders/perplexity");
return PerplexityLLM;
case "openrouter":
const { OpenRouterLLM } = require("../AiProviders/openRouter");
return OpenRouterLLM;
case "mistral":
const { MistralLLM } = require("../AiProviders/mistral");
return MistralLLM;
case "huggingface":
const { HuggingFaceLLM } = require("../AiProviders/huggingface");
return HuggingFaceLLM;
case "groq":
const { GroqLLM } = require("../AiProviders/groq");
return GroqLLM;
case "koboldcpp":
const { KoboldCPPLLM } = require("../AiProviders/koboldCPP");
return KoboldCPPLLM;
case "textgenwebui":
const { TextGenWebUILLM } = require("../AiProviders/textGenWebUI");
return TextGenWebUILLM;
case "cohere":
const { CohereLLM } = require("../AiProviders/cohere");
return CohereLLM;
case "litellm":
const { LiteLLM } = require("../AiProviders/liteLLM");
return LiteLLM;
case "generic-openai":
const { GenericOpenAiLLM } = require("../AiProviders/genericOpenAi");
return GenericOpenAiLLM;
case "bedrock":
const { AWSBedrockLLM } = require("../AiProviders/bedrock");
return AWSBedrockLLM;
case "deepseek":
const { DeepSeekLLM } = require("../AiProviders/deepseek");
return DeepSeekLLM;
case "apipie":
const { ApiPieLLM } = require("../AiProviders/apipie");
return ApiPieLLM;
case "novita":
const { NovitaLLM } = require("../AiProviders/novita");
return NovitaLLM;
case "xai":
const { XAiLLM } = require("../AiProviders/xai");
return XAiLLM;
case "nvidia-nim":
const { NvidiaNimLLM } = require("../AiProviders/nvidiaNim");
return NvidiaNimLLM;
case "ppio":
const { PPIOLLM } = require("../AiProviders/ppio");
return PPIOLLM;
case "dpais":
const { DellProAiStudioLLM } = require("../AiProviders/dellProAiStudio");
return DellProAiStudioLLM;
case "moonshotai":
const { MoonshotAiLLM } = require("../AiProviders/moonshotAi");
return MoonshotAiLLM;
case "cometapi":
const { CometApiLLM } = require("../AiProviders/cometapi");
return CometApiLLM;
case "foundry":
const { FoundryLLM } = require("../AiProviders/foundry");
return FoundryLLM;
case "zai":
const { ZAiLLM } = require("../AiProviders/zai");
return ZAiLLM;
case "giteeai":
const { GiteeAILLM } = require("../AiProviders/giteeai");
return GiteeAILLM;
default:
return null;
}
}
/**
* Returns the defined model (if available) for the given provider.
* @param {{provider: string | null} | null} params - Initialize params for LLMs provider
* @returns {string | null}
*/
function getBaseLLMProviderModel({ provider = null } = {}) {
switch (provider) {
case "openai":
return process.env.OPEN_MODEL_PREF;
case "azure":
return process.env.OPEN_MODEL_PREF;
case "anthropic":
return process.env.ANTHROPIC_MODEL_PREF;
case "gemini":
return process.env.GEMINI_LLM_MODEL_PREF;
case "lmstudio":
return process.env.LMSTUDIO_MODEL_PREF;
case "localai":
return process.env.LOCAL_AI_MODEL_PREF;
case "ollama":
return process.env.OLLAMA_MODEL_PREF;
case "togetherai":
return process.env.TOGETHER_AI_MODEL_PREF;
case "fireworksai":
return process.env.FIREWORKS_AI_LLM_MODEL_PREF;
case "perplexity":
return process.env.PERPLEXITY_MODEL_PREF;
case "openrouter":
return process.env.OPENROUTER_MODEL_PREF;
case "mistral":
return process.env.MISTRAL_MODEL_PREF;
case "huggingface":
return null;
case "groq":
return process.env.GROQ_MODEL_PREF;
case "koboldcpp":
return process.env.KOBOLD_CPP_MODEL_PREF;
case "textgenwebui":
return null;
case "cohere":
return process.env.COHERE_MODEL_PREF;
case "litellm":
return process.env.LITE_LLM_MODEL_PREF;
case "generic-openai":
return process.env.GENERIC_OPEN_AI_MODEL_PREF;
case "bedrock":
return process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE;
case "deepseek":
return process.env.DEEPSEEK_MODEL_PREF;
case "apipie":
return process.env.APIPIE_LLM_MODEL_PREF;
case "novita":
return process.env.NOVITA_LLM_MODEL_PREF;
case "xai":
return process.env.XAI_LLM_MODEL_PREF;
case "nvidia-nim":
return process.env.NVIDIA_NIM_LLM_MODEL_PREF;
case "ppio":
return process.env.PPIO_MODEL_PREF;
case "dpais":
return process.env.DPAIS_LLM_MODEL_PREF;
case "moonshotai":
return process.env.MOONSHOT_AI_MODEL_PREF;
case "cometapi":
return process.env.COMETAPI_LLM_MODEL_PREF;
case "foundry":
return process.env.FOUNDRY_MODEL_PREF;
case "zai":
return process.env.ZAI_MODEL_PREF;
case "giteeai":
return process.env.GITEE_AI_MODEL_PREF;
default:
return null;
}
}
// Some models have lower restrictions on chars that can be encoded in a single pass
// and by default we assume it can handle 1,000 chars, but some models use work with smaller
// chars so here we can override that value when embedding information.
function maximumChunkLength() {
if (
!!process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH &&
!isNaN(process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH) &&
Number(process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH) > 1
)
return Number(process.env.EMBEDDING_MODEL_MAX_CHUNK_LENGTH);
return 1_000;
}
function toChunks(arr, size) {
return Array.from({ length: Math.ceil(arr.length / size) }, (_v, i) =>
arr.slice(i * size, i * size + size)
);
}
module.exports = {
getEmbeddingEngineSelection,
maximumChunkLength,
getVectorDbClass,
getLLMProviderClass,
getBaseLLMProviderModel,
getLLMProvider,
toChunks,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/helpers/camelcase.js | server/utils/helpers/camelcase.js | const UPPERCASE = /[\p{Lu}]/u;
const LOWERCASE = /[\p{Ll}]/u;
const LEADING_CAPITAL = /^[\p{Lu}](?![\p{Lu}])/gu;
const IDENTIFIER = /([\p{Alpha}\p{N}_]|$)/u;
const SEPARATORS = /[_.\- ]+/;
const LEADING_SEPARATORS = new RegExp("^" + SEPARATORS.source);
const SEPARATORS_AND_IDENTIFIER = new RegExp(
SEPARATORS.source + IDENTIFIER.source,
"gu"
);
const NUMBERS_AND_IDENTIFIER = new RegExp("\\d+" + IDENTIFIER.source, "gu");
const preserveCamelCase = (
string,
toLowerCase,
toUpperCase,
preserveConsecutiveUppercase
) => {
let isLastCharLower = false;
let isLastCharUpper = false;
let isLastLastCharUpper = false;
let isLastLastCharPreserved = false;
for (let index = 0; index < string.length; index++) {
const character = string[index];
isLastLastCharPreserved = index > 2 ? string[index - 3] === "-" : true;
if (isLastCharLower && UPPERCASE.test(character)) {
string = string.slice(0, index) + "-" + string.slice(index);
isLastCharLower = false;
isLastLastCharUpper = isLastCharUpper;
isLastCharUpper = true;
index++;
} else if (
isLastCharUpper &&
isLastLastCharUpper &&
LOWERCASE.test(character) &&
(!isLastLastCharPreserved || preserveConsecutiveUppercase)
) {
string = string.slice(0, index - 1) + "-" + string.slice(index - 1);
isLastLastCharUpper = isLastCharUpper;
isLastCharUpper = false;
isLastCharLower = true;
} else {
isLastCharLower =
toLowerCase(character) === character &&
toUpperCase(character) !== character;
isLastLastCharUpper = isLastCharUpper;
isLastCharUpper =
toUpperCase(character) === character &&
toLowerCase(character) !== character;
}
}
return string;
};
const preserveConsecutiveUppercase = (input, toLowerCase) => {
LEADING_CAPITAL.lastIndex = 0;
return input.replace(LEADING_CAPITAL, (m1) => toLowerCase(m1));
};
const postProcess = (input, toUpperCase) => {
SEPARATORS_AND_IDENTIFIER.lastIndex = 0;
NUMBERS_AND_IDENTIFIER.lastIndex = 0;
return input
.replace(SEPARATORS_AND_IDENTIFIER, (_, identifier) =>
toUpperCase(identifier)
)
.replace(NUMBERS_AND_IDENTIFIER, (m) => toUpperCase(m));
};
function camelCase(input, options) {
if (!(typeof input === "string" || Array.isArray(input))) {
throw new TypeError("Expected the input to be `string | string[]`");
}
options = {
pascalCase: true,
preserveConsecutiveUppercase: false,
...options,
};
if (Array.isArray(input)) {
input = input
.map((x) => x.trim())
.filter((x) => x.length)
.join("-");
} else {
input = input.trim();
}
if (input.length === 0) {
return "";
}
const toLowerCase =
options.locale === false
? (string) => string.toLowerCase()
: (string) => string.toLocaleLowerCase(options.locale);
const toUpperCase =
options.locale === false
? (string) => string.toUpperCase()
: (string) => string.toLocaleUpperCase(options.locale);
if (input.length === 1) {
if (SEPARATORS.test(input)) {
return "";
}
return options.pascalCase ? toUpperCase(input) : toLowerCase(input);
}
const hasUpperCase = input !== toLowerCase(input);
if (hasUpperCase) {
input = preserveCamelCase(
input,
toLowerCase,
toUpperCase,
options.preserveConsecutiveUppercase
);
}
input = input.replace(LEADING_SEPARATORS, "");
input = options.preserveConsecutiveUppercase
? preserveConsecutiveUppercase(input, toLowerCase)
: toLowerCase(input);
if (options.pascalCase) {
input = toUpperCase(input.charAt(0)) + input.slice(1);
}
return postProcess(input, toUpperCase);
}
module.exports = {
camelCase,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/helpers/customModels.js | server/utils/helpers/customModels.js | const { fetchOpenRouterModels } = require("../AiProviders/openRouter");
const {
fetchOpenRouterEmbeddingModels,
} = require("../EmbeddingEngines/openRouter");
const { fetchApiPieModels } = require("../AiProviders/apipie");
const { perplexityModels } = require("../AiProviders/perplexity");
const { fireworksAiModels } = require("../AiProviders/fireworksAi");
const { ElevenLabsTTS } = require("../TextToSpeech/elevenLabs");
const { fetchNovitaModels } = require("../AiProviders/novita");
const { parseLMStudioBasePath } = require("../AiProviders/lmStudio");
const { parseNvidiaNimBasePath } = require("../AiProviders/nvidiaNim");
const { fetchPPIOModels } = require("../AiProviders/ppio");
const { GeminiLLM } = require("../AiProviders/gemini");
const { fetchCometApiModels } = require("../AiProviders/cometapi");
const { parseFoundryBasePath } = require("../AiProviders/foundry");
const SUPPORT_CUSTOM_MODELS = [
"openai",
"anthropic",
"localai",
"ollama",
"togetherai",
"fireworksai",
"nvidia-nim",
"mistral",
"perplexity",
"openrouter",
"lmstudio",
"koboldcpp",
"litellm",
"elevenlabs-tts",
"groq",
"deepseek",
"apipie",
"novita",
"cometapi",
"xai",
"gemini",
"ppio",
"dpais",
"moonshotai",
"foundry",
"cohere",
"zai",
"giteeai",
// Embedding Engines
"native-embedder",
"cohere-embedder",
"openrouter-embedder",
];
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
if (!SUPPORT_CUSTOM_MODELS.includes(provider))
return { models: [], error: "Invalid provider for custom models" };
switch (provider) {
case "openai":
return await openAiModels(apiKey);
case "anthropic":
return await anthropicModels(apiKey);
case "localai":
return await localAIModels(basePath, apiKey);
case "ollama":
return await ollamaAIModels(basePath, apiKey);
case "togetherai":
return await getTogetherAiModels(apiKey);
case "fireworksai":
return await getFireworksAiModels(apiKey);
case "mistral":
return await getMistralModels(apiKey);
case "perplexity":
return await getPerplexityModels();
case "openrouter":
return await getOpenRouterModels();
case "lmstudio":
return await getLMStudioModels(basePath);
case "koboldcpp":
return await getKoboldCPPModels(basePath);
case "litellm":
return await liteLLMModels(basePath, apiKey);
case "elevenlabs-tts":
return await getElevenLabsModels(apiKey);
case "groq":
return await getGroqAiModels(apiKey);
case "deepseek":
return await getDeepSeekModels(apiKey);
case "apipie":
return await getAPIPieModels(apiKey);
case "novita":
return await getNovitaModels();
case "cometapi":
return await getCometApiModels();
case "xai":
return await getXAIModels(apiKey);
case "nvidia-nim":
return await getNvidiaNimModels(basePath);
case "gemini":
return await getGeminiModels(apiKey);
case "ppio":
return await getPPIOModels(apiKey);
case "dpais":
return await getDellProAiStudioModels(basePath);
case "moonshotai":
return await getMoonshotAiModels(apiKey);
case "foundry":
return await getFoundryModels(basePath);
case "cohere":
return await getCohereModels(apiKey, "chat");
case "zai":
return await getZAiModels(apiKey);
case "native-embedder":
return await getNativeEmbedderModels();
case "cohere-embedder":
return await getCohereModels(apiKey, "embed");
case "openrouter-embedder":
return await getOpenRouterEmbeddingModels();
case "giteeai":
return await getGiteeAIModels(apiKey);
default:
return { models: [], error: "Invalid provider for custom models" };
}
}
async function openAiModels(apiKey = null) {
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
apiKey: apiKey || process.env.OPEN_AI_KEY,
});
const allModels = await openai.models
.list()
.then((results) => results.data)
.catch((e) => {
console.error(`OpenAI:listModels`, e.message);
return [
{
name: "gpt-3.5-turbo",
id: "gpt-3.5-turbo",
object: "model",
created: 1677610602,
owned_by: "openai",
organization: "OpenAi",
},
{
name: "gpt-4o",
id: "gpt-4o",
object: "model",
created: 1677610602,
owned_by: "openai",
organization: "OpenAi",
},
{
name: "gpt-4",
id: "gpt-4",
object: "model",
created: 1687882411,
owned_by: "openai",
organization: "OpenAi",
},
{
name: "gpt-4-turbo",
id: "gpt-4-turbo",
object: "model",
created: 1712361441,
owned_by: "system",
organization: "OpenAi",
},
{
name: "gpt-4-32k",
id: "gpt-4-32k",
object: "model",
created: 1687979321,
owned_by: "openai",
organization: "OpenAi",
},
{
name: "gpt-3.5-turbo-16k",
id: "gpt-3.5-turbo-16k",
object: "model",
created: 1683758102,
owned_by: "openai-internal",
organization: "OpenAi",
},
];
});
const gpts = allModels
.filter(
(model) =>
(model.id.includes("gpt") && !model.id.startsWith("ft:")) ||
model.id.startsWith("o") // o1, o1-mini, o3, etc
)
.filter(
(model) =>
!model.id.includes("vision") &&
!model.id.includes("instruct") &&
!model.id.includes("audio") &&
!model.id.includes("realtime") &&
!model.id.includes("image") &&
!model.id.includes("moderation") &&
!model.id.includes("transcribe")
)
.map((model) => {
return {
...model,
name: model.id,
organization: "OpenAi",
};
});
const customModels = allModels
.filter(
(model) =>
!model.owned_by.includes("openai") && model.owned_by !== "system"
)
.map((model) => {
return {
...model,
name: model.id,
organization: "Your Fine-Tunes",
};
});
// Api Key was successful so lets save it for future uses
if ((gpts.length > 0 || customModels.length > 0) && !!apiKey)
process.env.OPEN_AI_KEY = apiKey;
return { models: [...gpts, ...customModels], error: null };
}
async function anthropicModels(_apiKey = null) {
const apiKey =
_apiKey === true
? process.env.ANTHROPIC_API_KEY
: _apiKey || process.env.ANTHROPIC_API_KEY || null;
const AnthropicAI = require("@anthropic-ai/sdk");
const anthropic = new AnthropicAI({ apiKey });
const models = await anthropic.models
.list()
.then((results) => results.data)
.then((models) => {
return models
.filter((model) => model.type === "model")
.map((model) => {
return {
id: model.id,
name: model.display_name,
};
});
})
.catch((e) => {
console.error(`Anthropic:listModels`, e.message);
return [];
});
// Api Key was successful so lets save it for future uses
if (models.length > 0 && !!apiKey) process.env.ANTHROPIC_API_KEY = apiKey;
return { models, error: null };
}
async function localAIModels(basePath = null, apiKey = null) {
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
baseURL: basePath || process.env.LOCAL_AI_BASE_PATH,
apiKey: apiKey || process.env.LOCAL_AI_API_KEY || null,
});
const models = await openai.models
.list()
.then((results) => results.data)
.catch((e) => {
console.error(`LocalAI:listModels`, e.message);
return [];
});
// Api Key was successful so lets save it for future uses
if (models.length > 0 && !!apiKey) process.env.LOCAL_AI_API_KEY = apiKey;
return { models, error: null };
}
async function getGroqAiModels(_apiKey = null) {
const { OpenAI: OpenAIApi } = require("openai");
const apiKey =
_apiKey === true
? process.env.GROQ_API_KEY
: _apiKey || process.env.GROQ_API_KEY || null;
const openai = new OpenAIApi({
baseURL: "https://api.groq.com/openai/v1",
apiKey,
});
const models = (
await openai.models
.list()
.then((results) => results.data)
.catch((e) => {
console.error(`GroqAi:listModels`, e.message);
return [];
})
).filter(
(model) => !model.id.includes("whisper") && !model.id.includes("tool-use")
);
// Api Key was successful so lets save it for future uses
if (models.length > 0 && !!apiKey) process.env.GROQ_API_KEY = apiKey;
return { models, error: null };
}
async function liteLLMModels(basePath = null, apiKey = null) {
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
baseURL: basePath || process.env.LITE_LLM_BASE_PATH,
apiKey: apiKey || process.env.LITE_LLM_API_KEY || null,
});
const models = await openai.models
.list()
.then((results) => results.data)
.catch((e) => {
console.error(`LiteLLM:listModels`, e.message);
return [];
});
// Api Key was successful so lets save it for future uses
if (models.length > 0 && !!apiKey) process.env.LITE_LLM_API_KEY = apiKey;
return { models, error: null };
}
async function getLMStudioModels(basePath = null) {
try {
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
baseURL: parseLMStudioBasePath(
basePath || process.env.LMSTUDIO_BASE_PATH
),
apiKey: null,
});
const models = await openai.models
.list()
.then((results) => results.data)
.catch((e) => {
console.error(`LMStudio:listModels`, e.message);
return [];
});
return { models, error: null };
} catch (e) {
console.error(`LMStudio:getLMStudioModels`, e.message);
return { models: [], error: "Could not fetch LMStudio Models" };
}
}
async function getKoboldCPPModels(basePath = null) {
try {
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
baseURL: basePath || process.env.KOBOLD_CPP_BASE_PATH,
apiKey: null,
});
const models = await openai.models
.list()
.then((results) => results.data)
.catch((e) => {
console.error(`KoboldCPP:listModels`, e.message);
return [];
});
return { models, error: null };
} catch (e) {
console.error(`KoboldCPP:getKoboldCPPModels`, e.message);
return { models: [], error: "Could not fetch KoboldCPP Models" };
}
}
async function ollamaAIModels(basePath = null, _authToken = null) {
let url;
try {
let urlPath = basePath ?? process.env.OLLAMA_BASE_PATH;
new URL(urlPath);
if (urlPath.split("").slice(-1)?.[0] === "/")
throw new Error("BasePath Cannot end in /!");
url = urlPath;
} catch {
return { models: [], error: "Not a valid URL." };
}
const authToken = _authToken || process.env.OLLAMA_AUTH_TOKEN || null;
const headers = authToken ? { Authorization: `Bearer ${authToken}` } : {};
const models = await fetch(`${url}/api/tags`, { headers: headers })
.then((res) => {
if (!res.ok)
throw new Error(`Could not reach Ollama server! ${res.status}`);
return res.json();
})
.then((data) => data?.models || [])
.then((models) =>
models.map((model) => {
return { id: model.name };
})
)
.catch((e) => {
console.error(e);
return [];
});
// Api Key was successful so lets save it for future uses
if (models.length > 0 && !!authToken)
process.env.OLLAMA_AUTH_TOKEN = authToken;
return { models, error: null };
}
async function getTogetherAiModels(apiKey = null) {
const _apiKey =
apiKey === true
? process.env.TOGETHER_AI_API_KEY
: apiKey || process.env.TOGETHER_AI_API_KEY || null;
try {
const { togetherAiModels } = require("../AiProviders/togetherAi");
const models = await togetherAiModels(_apiKey);
if (models.length > 0 && !!_apiKey)
process.env.TOGETHER_AI_API_KEY = _apiKey;
return { models, error: null };
} catch (error) {
console.error("Error in getTogetherAiModels:", error);
return { models: [], error: "Failed to fetch Together AI models" };
}
}
async function getFireworksAiModels(apiKey = null) {
const knownModels = await fireworksAiModels(apiKey);
if (!Object.keys(knownModels).length === 0)
return { models: [], error: null };
const models = Object.values(knownModels).map((model) => {
return {
id: model.id,
organization: model.organization,
name: model.name,
};
});
return { models, error: null };
}
async function getPerplexityModels() {
const knownModels = perplexityModels();
if (!Object.keys(knownModels).length === 0)
return { models: [], error: null };
const models = Object.values(knownModels).map((model) => {
return {
id: model.id,
name: model.name,
};
});
return { models, error: null };
}
async function getOpenRouterModels() {
const knownModels = await fetchOpenRouterModels();
if (!Object.keys(knownModels).length === 0)
return { models: [], error: null };
const models = Object.values(knownModels).map((model) => {
return {
id: model.id,
organization: model.organization,
name: model.name,
};
});
return { models, error: null };
}
async function getNovitaModels() {
const knownModels = await fetchNovitaModels();
if (!Object.keys(knownModels).length === 0)
return { models: [], error: null };
const models = Object.values(knownModels).map((model) => {
return {
id: model.id,
organization: model.organization,
name: model.name,
};
});
return { models, error: null };
}
async function getCometApiModels() {
const knownModels = await fetchCometApiModels();
if (!Object.keys(knownModels).length === 0)
return { models: [], error: null };
const models = Object.values(knownModels).map((model) => {
return {
id: model.id,
organization: model.organization,
name: model.name,
};
});
return { models, error: null };
}
async function getAPIPieModels(apiKey = null) {
const knownModels = await fetchApiPieModels(apiKey);
if (!Object.keys(knownModels).length === 0)
return { models: [], error: null };
const models = Object.values(knownModels)
.filter((model) => {
// Filter for chat models
return (
model.subtype &&
(model.subtype.includes("chat") || model.subtype.includes("chatx"))
);
})
.map((model) => {
return {
id: model.id,
organization: model.organization,
name: model.name,
};
});
return { models, error: null };
}
async function getMistralModels(apiKey = null) {
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
apiKey: apiKey || process.env.MISTRAL_API_KEY || null,
baseURL: "https://api.mistral.ai/v1",
});
const models = await openai.models
.list()
.then((results) =>
results.data.filter((model) => !model.id.includes("embed"))
)
.catch((e) => {
console.error(`Mistral:listModels`, e.message);
return [];
});
// Api Key was successful so lets save it for future uses
if (models.length > 0 && !!apiKey) process.env.MISTRAL_API_KEY = apiKey;
return { models, error: null };
}
async function getElevenLabsModels(apiKey = null) {
const models = (await ElevenLabsTTS.voices(apiKey)).map((model) => {
return {
id: model.voice_id,
organization: model.category,
name: model.name,
};
});
if (models.length === 0) {
return {
models: [
{
id: "21m00Tcm4TlvDq8ikWAM",
organization: "premade",
name: "Rachel (default)",
},
],
error: null,
};
}
if (models.length > 0 && !!apiKey) process.env.TTS_ELEVEN_LABS_KEY = apiKey;
return { models, error: null };
}
async function getDeepSeekModels(apiKey = null) {
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
apiKey: apiKey || process.env.DEEPSEEK_API_KEY,
baseURL: "https://api.deepseek.com/v1",
});
const models = await openai.models
.list()
.then((results) => results.data)
.then((models) =>
models.map((model) => ({
id: model.id,
name: model.id,
organization: model.owned_by,
}))
)
.catch((e) => {
console.error(`DeepSeek:listModels`, e.message);
return [
{
id: "deepseek-chat",
name: "deepseek-chat",
organization: "deepseek",
},
{
id: "deepseek-reasoner",
name: "deepseek-reasoner",
organization: "deepseek",
},
];
});
if (models.length > 0 && !!apiKey) process.env.DEEPSEEK_API_KEY = apiKey;
return { models, error: null };
}
async function getGiteeAIModels() {
const { giteeAiModels } = require("../AiProviders/giteeai");
const modelMap = await giteeAiModels();
if (!Object.keys(modelMap).length === 0) return { models: [], error: null };
const models = Object.values(modelMap).map((model) => {
return {
id: model.id,
organization: model.organization ?? "GiteeAI",
name: model.id,
};
});
return { models, error: null };
}
async function getXAIModels(_apiKey = null) {
const { OpenAI: OpenAIApi } = require("openai");
const apiKey =
_apiKey === true
? process.env.XAI_LLM_API_KEY
: _apiKey || process.env.XAI_LLM_API_KEY || null;
const openai = new OpenAIApi({
baseURL: "https://api.x.ai/v1",
apiKey,
});
const models = await openai.models
.list()
.then((results) => results.data)
.catch((e) => {
console.error(`XAI:listModels`, e.message);
return [
{
created: 1725148800,
id: "grok-beta",
object: "model",
owned_by: "xai",
},
];
});
// Api Key was successful so lets save it for future uses
if (models.length > 0 && !!apiKey) process.env.XAI_LLM_API_KEY = apiKey;
return { models, error: null };
}
async function getNvidiaNimModels(basePath = null) {
try {
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
baseURL: parseNvidiaNimBasePath(
basePath ?? process.env.NVIDIA_NIM_LLM_BASE_PATH
),
apiKey: null,
});
const modelResponse = await openai.models
.list()
.then((results) => results.data)
.catch((e) => {
throw new Error(e.message);
});
const models = modelResponse.map((model) => {
return {
id: model.id,
name: model.id,
organization: model.owned_by,
};
});
return { models, error: null };
} catch (e) {
console.error(`NVIDIA NIM:getNvidiaNimModels`, e.message);
return { models: [], error: "Could not fetch NVIDIA NIM Models" };
}
}
async function getGeminiModels(_apiKey = null) {
const apiKey =
_apiKey === true
? process.env.GEMINI_API_KEY
: _apiKey || process.env.GEMINI_API_KEY || null;
const models = await GeminiLLM.fetchModels(apiKey);
// Api Key was successful so lets save it for future uses
if (models.length > 0 && !!apiKey) process.env.GEMINI_API_KEY = apiKey;
return { models, error: null };
}
async function getPPIOModels() {
const ppioModels = await fetchPPIOModels();
if (!Object.keys(ppioModels).length === 0) return { models: [], error: null };
const models = Object.values(ppioModels).map((model) => {
return {
id: model.id,
organization: model.organization,
name: model.name,
};
});
return { models, error: null };
}
async function getDellProAiStudioModels(basePath = null) {
const { OpenAI: OpenAIApi } = require("openai");
try {
const { origin } = new URL(
basePath || process.env.DELL_PRO_AI_STUDIO_BASE_PATH
);
const openai = new OpenAIApi({
baseURL: `${origin}/v1/openai`,
apiKey: null,
});
const models = await openai.models
.list()
.then((results) => results.data)
.then((models) => {
return models
.filter(
(model) => model?.capability?.includes("TextToText") // Only include text-to-text models for this handler
)
.map((model) => {
return {
id: model.id,
name: model.name,
organization: model.owned_by,
};
});
})
.catch((e) => {
throw new Error(e.message);
});
return { models, error: null };
} catch (e) {
console.error(`getDellProAiStudioModels`, e.message);
return {
models: [],
error: "Could not reach Dell Pro Ai Studio from the provided base path",
};
}
}
function getNativeEmbedderModels() {
const { NativeEmbedder } = require("../EmbeddingEngines/native");
return { models: NativeEmbedder.availableModels(), error: null };
}
async function getMoonshotAiModels(_apiKey = null) {
const apiKey =
_apiKey === true
? process.env.MOONSHOT_AI_API_KEY
: _apiKey || process.env.MOONSHOT_AI_API_KEY || null;
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
baseURL: "https://api.moonshot.ai/v1",
apiKey,
});
const models = await openai.models
.list()
.then((results) => results.data)
.catch((e) => {
console.error(`MoonshotAi:listModels`, e.message);
return [];
});
// Api Key was successful so lets save it for future uses
if (models.length > 0) process.env.MOONSHOT_AI_API_KEY = apiKey;
return { models, error: null };
}
async function getFoundryModels(basePath = null) {
try {
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
baseURL: parseFoundryBasePath(basePath || process.env.FOUNDRY_BASE_PATH),
apiKey: null,
});
const models = await openai.models
.list()
.then((results) =>
results.data.map((model) => ({
...model,
name: model.id,
}))
)
.catch((e) => {
console.error(`Foundry:listModels`, e.message);
return [];
});
return { models, error: null };
} catch (e) {
console.error(`Foundry:getFoundryModels`, e.message);
return { models: [], error: "Could not fetch Foundry Models" };
}
}
/**
* Get Cohere models
* @param {string} _apiKey - The API key to use
* @param {'chat' | 'embed'} type - The type of model to get
* @returns {Promise<{models: Array<{id: string, organization: string, name: string}>, error: string | null}>}
*/
async function getCohereModels(_apiKey = null, type = "chat") {
const apiKey =
_apiKey === true
? process.env.COHERE_API_KEY
: _apiKey || process.env.COHERE_API_KEY || null;
const { CohereClient } = require("cohere-ai");
const cohere = new CohereClient({
token: apiKey,
});
const models = await cohere.models
.list({ pageSize: 1000, endpoint: type })
.then((results) => results.models)
.then((models) =>
models.map((model) => ({
id: model.name,
name: model.name,
}))
)
.catch((e) => {
console.error(`Cohere:listModels`, e.message);
return [];
});
return { models, error: null };
}
async function getZAiModels(_apiKey = null) {
const { OpenAI: OpenAIApi } = require("openai");
const apiKey =
_apiKey === true
? process.env.ZAI_API_KEY
: _apiKey || process.env.ZAI_API_KEY || null;
const openai = new OpenAIApi({
baseURL: "https://api.z.ai/api/paas/v4",
apiKey,
});
const models = await openai.models
.list()
.then((results) => results.data)
.catch((e) => {
console.error(`Z.AI:listModels`, e.message);
return [];
});
// Api Key was successful so lets save it for future uses
if (models.length > 0 && !!apiKey) process.env.ZAI_API_KEY = apiKey;
return { models, error: null };
}
async function getOpenRouterEmbeddingModels() {
const knownModels = await fetchOpenRouterEmbeddingModels();
if (!Object.keys(knownModels).length === 0)
return { models: [], error: null };
const models = Object.values(knownModels).map((model) => {
return {
id: model.id,
organization: model.organization,
name: model.name,
};
});
return { models, error: null };
}
module.exports = {
getCustomModels,
SUPPORT_CUSTOM_MODELS,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/helpers/updateENV.js | server/utils/helpers/updateENV.js | const { Telemetry } = require("../../models/telemetry");
const {
SUPPORTED_CONNECTION_METHODS,
} = require("../AiProviders/bedrock/utils");
const { resetAllVectorStores } = require("../vectorStore/resetAllVectorStores");
const KEY_MAPPING = {
LLMProvider: {
envKey: "LLM_PROVIDER",
checks: [isNotEmpty, supportedLLM],
},
// OpenAI Settings
OpenAiKey: {
envKey: "OPEN_AI_KEY",
checks: [isNotEmpty, validOpenAIKey],
},
OpenAiModelPref: {
envKey: "OPEN_MODEL_PREF",
checks: [isNotEmpty],
},
// Azure OpenAI Settings
AzureOpenAiEndpoint: {
envKey: "AZURE_OPENAI_ENDPOINT",
checks: [isNotEmpty],
},
AzureOpenAiTokenLimit: {
envKey: "AZURE_OPENAI_TOKEN_LIMIT",
checks: [validOpenAiTokenLimit],
},
AzureOpenAiKey: {
envKey: "AZURE_OPENAI_KEY",
checks: [isNotEmpty],
},
AzureOpenAiModelPref: {
envKey: "OPEN_MODEL_PREF",
checks: [isNotEmpty],
},
AzureOpenAiEmbeddingModelPref: {
envKey: "EMBEDDING_MODEL_PREF",
checks: [isNotEmpty],
},
AzureOpenAiModelType: {
envKey: "AZURE_OPENAI_MODEL_TYPE",
checks: [
(input) =>
["default", "reasoning"].includes(input)
? null
: "Invalid model type. Must be one of: default, reasoning.",
],
},
// Anthropic Settings
AnthropicApiKey: {
envKey: "ANTHROPIC_API_KEY",
checks: [isNotEmpty, validAnthropicApiKey],
},
AnthropicModelPref: {
envKey: "ANTHROPIC_MODEL_PREF",
checks: [isNotEmpty],
},
AnthropicCacheControl: {
envKey: "ANTHROPIC_CACHE_CONTROL",
checks: [
(input) =>
["none", "5m", "1h"].includes(input)
? null
: "Invalid cache control. Must be one of: 5m, 1h.",
],
},
GeminiLLMApiKey: {
envKey: "GEMINI_API_KEY",
checks: [isNotEmpty],
},
GeminiLLMModelPref: {
envKey: "GEMINI_LLM_MODEL_PREF",
checks: [isNotEmpty],
},
GeminiSafetySetting: {
envKey: "GEMINI_SAFETY_SETTING",
checks: [validGeminiSafetySetting],
},
// LMStudio Settings
LMStudioBasePath: {
envKey: "LMSTUDIO_BASE_PATH",
checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl],
},
LMStudioModelPref: {
envKey: "LMSTUDIO_MODEL_PREF",
checks: [],
},
LMStudioTokenLimit: {
envKey: "LMSTUDIO_MODEL_TOKEN_LIMIT",
checks: [],
},
// LocalAI Settings
LocalAiBasePath: {
envKey: "LOCAL_AI_BASE_PATH",
checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl],
},
LocalAiModelPref: {
envKey: "LOCAL_AI_MODEL_PREF",
checks: [],
},
LocalAiTokenLimit: {
envKey: "LOCAL_AI_MODEL_TOKEN_LIMIT",
checks: [nonZero],
},
LocalAiApiKey: {
envKey: "LOCAL_AI_API_KEY",
checks: [],
},
OllamaLLMBasePath: {
envKey: "OLLAMA_BASE_PATH",
checks: [isNotEmpty, validOllamaLLMBasePath, validDockerizedUrl],
},
OllamaLLMModelPref: {
envKey: "OLLAMA_MODEL_PREF",
checks: [],
},
OllamaLLMTokenLimit: {
envKey: "OLLAMA_MODEL_TOKEN_LIMIT",
checks: [],
},
OllamaLLMPerformanceMode: {
envKey: "OLLAMA_PERFORMANCE_MODE",
checks: [],
},
OllamaLLMKeepAliveSeconds: {
envKey: "OLLAMA_KEEP_ALIVE_TIMEOUT",
checks: [isInteger],
},
OllamaLLMAuthToken: {
envKey: "OLLAMA_AUTH_TOKEN",
checks: [],
},
// Mistral AI API Settings
MistralApiKey: {
envKey: "MISTRAL_API_KEY",
checks: [isNotEmpty],
},
MistralModelPref: {
envKey: "MISTRAL_MODEL_PREF",
checks: [isNotEmpty],
},
// Hugging Face LLM Inference Settings
HuggingFaceLLMEndpoint: {
envKey: "HUGGING_FACE_LLM_ENDPOINT",
checks: [isNotEmpty, isValidURL, validHuggingFaceEndpoint],
},
HuggingFaceLLMAccessToken: {
envKey: "HUGGING_FACE_LLM_API_KEY",
checks: [isNotEmpty],
},
HuggingFaceLLMTokenLimit: {
envKey: "HUGGING_FACE_LLM_TOKEN_LIMIT",
checks: [nonZero],
},
// KoboldCPP Settings
KoboldCPPBasePath: {
envKey: "KOBOLD_CPP_BASE_PATH",
checks: [isNotEmpty, isValidURL],
},
KoboldCPPModelPref: {
envKey: "KOBOLD_CPP_MODEL_PREF",
checks: [isNotEmpty],
},
KoboldCPPTokenLimit: {
envKey: "KOBOLD_CPP_MODEL_TOKEN_LIMIT",
checks: [nonZero],
},
KoboldCPPMaxTokens: {
envKey: "KOBOLD_CPP_MAX_TOKENS",
checks: [nonZero],
},
// Text Generation Web UI Settings
TextGenWebUIBasePath: {
envKey: "TEXT_GEN_WEB_UI_BASE_PATH",
checks: [isValidURL],
},
TextGenWebUITokenLimit: {
envKey: "TEXT_GEN_WEB_UI_MODEL_TOKEN_LIMIT",
checks: [nonZero],
},
TextGenWebUIAPIKey: {
envKey: "TEXT_GEN_WEB_UI_API_KEY",
checks: [],
},
// LiteLLM Settings
LiteLLMModelPref: {
envKey: "LITE_LLM_MODEL_PREF",
checks: [isNotEmpty],
},
LiteLLMTokenLimit: {
envKey: "LITE_LLM_MODEL_TOKEN_LIMIT",
checks: [nonZero],
},
LiteLLMBasePath: {
envKey: "LITE_LLM_BASE_PATH",
checks: [isValidURL],
},
LiteLLMApiKey: {
envKey: "LITE_LLM_API_KEY",
checks: [],
},
// Generic OpenAI InferenceSettings
GenericOpenAiBasePath: {
envKey: "GENERIC_OPEN_AI_BASE_PATH",
checks: [isValidURL],
},
GenericOpenAiModelPref: {
envKey: "GENERIC_OPEN_AI_MODEL_PREF",
checks: [isNotEmpty],
},
GenericOpenAiTokenLimit: {
envKey: "GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT",
checks: [nonZero],
},
GenericOpenAiKey: {
envKey: "GENERIC_OPEN_AI_API_KEY",
checks: [],
},
GenericOpenAiMaxTokens: {
envKey: "GENERIC_OPEN_AI_MAX_TOKENS",
checks: [nonZero],
},
// AWS Bedrock LLM InferenceSettings
AwsBedrockLLMConnectionMethod: {
envKey: "AWS_BEDROCK_LLM_CONNECTION_METHOD",
checks: [
(input) =>
SUPPORTED_CONNECTION_METHODS.includes(input) ? null : "invalid Value",
],
},
AwsBedrockLLMAccessKeyId: {
envKey: "AWS_BEDROCK_LLM_ACCESS_KEY_ID",
checks: [],
},
AwsBedrockLLMAccessKey: {
envKey: "AWS_BEDROCK_LLM_ACCESS_KEY",
checks: [],
},
AwsBedrockLLMSessionToken: {
envKey: "AWS_BEDROCK_LLM_SESSION_TOKEN",
checks: [],
},
AwsBedrockLLMAPIKey: {
envKey: "AWS_BEDROCK_LLM_API_KEY",
checks: [],
},
AwsBedrockLLMRegion: {
envKey: "AWS_BEDROCK_LLM_REGION",
checks: [isNotEmpty],
},
AwsBedrockLLMModel: {
envKey: "AWS_BEDROCK_LLM_MODEL_PREFERENCE",
checks: [isNotEmpty],
},
AwsBedrockLLMTokenLimit: {
envKey: "AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT",
checks: [nonZero],
},
AwsBedrockLLMMaxOutputTokens: {
envKey: "AWS_BEDROCK_LLM_MAX_OUTPUT_TOKENS",
checks: [nonZero],
},
// Dell Pro AI Studio Settings
DellProAiStudioBasePath: {
envKey: "DPAIS_LLM_BASE_PATH",
checks: [isNotEmpty, validDockerizedUrl],
},
DellProAiStudioModelPref: {
envKey: "DPAIS_LLM_MODEL_PREF",
checks: [isNotEmpty],
},
DellProAiStudioTokenLimit: {
envKey: "DPAIS_LLM_MODEL_TOKEN_LIMIT",
checks: [nonZero],
},
EmbeddingEngine: {
envKey: "EMBEDDING_ENGINE",
checks: [supportedEmbeddingModel],
postUpdate: [handleVectorStoreReset],
},
EmbeddingBasePath: {
envKey: "EMBEDDING_BASE_PATH",
checks: [isNotEmpty, validDockerizedUrl],
},
EmbeddingModelPref: {
envKey: "EMBEDDING_MODEL_PREF",
checks: [isNotEmpty],
postUpdate: [handleVectorStoreReset, downloadEmbeddingModelIfRequired],
},
EmbeddingModelMaxChunkLength: {
envKey: "EMBEDDING_MODEL_MAX_CHUNK_LENGTH",
checks: [nonZero],
},
OllamaEmbeddingBatchSize: {
envKey: "OLLAMA_EMBEDDING_BATCH_SIZE",
checks: [nonZero],
},
// Gemini Embedding Settings
GeminiEmbeddingApiKey: {
envKey: "GEMINI_EMBEDDING_API_KEY",
checks: [isNotEmpty],
},
// Generic OpenAI Embedding Settings
GenericOpenAiEmbeddingApiKey: {
envKey: "GENERIC_OPEN_AI_EMBEDDING_API_KEY",
checks: [],
},
GenericOpenAiEmbeddingMaxConcurrentChunks: {
envKey: "GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS",
checks: [nonZero],
},
// Vector Database Selection Settings
VectorDB: {
envKey: "VECTOR_DB",
checks: [isNotEmpty, supportedVectorDB],
postUpdate: [handleVectorStoreReset],
},
// Chroma Options
ChromaEndpoint: {
envKey: "CHROMA_ENDPOINT",
checks: [isValidURL, validChromaURL, validDockerizedUrl],
},
ChromaApiHeader: {
envKey: "CHROMA_API_HEADER",
checks: [],
},
ChromaApiKey: {
envKey: "CHROMA_API_KEY",
checks: [],
},
// ChromaCloud Options
ChromaCloudApiKey: {
envKey: "CHROMACLOUD_API_KEY",
checks: [isNotEmpty],
},
ChromaCloudTenant: {
envKey: "CHROMACLOUD_TENANT",
checks: [isNotEmpty],
},
ChromaCloudDatabase: {
envKey: "CHROMACLOUD_DATABASE",
checks: [isNotEmpty],
},
// Weaviate Options
WeaviateEndpoint: {
envKey: "WEAVIATE_ENDPOINT",
checks: [isValidURL, validDockerizedUrl],
},
WeaviateApiKey: {
envKey: "WEAVIATE_API_KEY",
checks: [],
},
// QDrant Options
QdrantEndpoint: {
envKey: "QDRANT_ENDPOINT",
checks: [isValidURL, validDockerizedUrl],
},
QdrantApiKey: {
envKey: "QDRANT_API_KEY",
checks: [],
},
PineConeKey: {
envKey: "PINECONE_API_KEY",
checks: [],
},
PineConeIndex: {
envKey: "PINECONE_INDEX",
checks: [],
},
// Milvus Options
MilvusAddress: {
envKey: "MILVUS_ADDRESS",
checks: [isValidURL, validDockerizedUrl],
},
MilvusUsername: {
envKey: "MILVUS_USERNAME",
checks: [isNotEmpty],
},
MilvusPassword: {
envKey: "MILVUS_PASSWORD",
checks: [isNotEmpty],
},
// Zilliz Cloud Options
ZillizEndpoint: {
envKey: "ZILLIZ_ENDPOINT",
checks: [isValidURL],
},
ZillizApiToken: {
envKey: "ZILLIZ_API_TOKEN",
checks: [isNotEmpty],
},
// Astra DB Options
AstraDBApplicationToken: {
envKey: "ASTRA_DB_APPLICATION_TOKEN",
checks: [isNotEmpty],
},
AstraDBEndpoint: {
envKey: "ASTRA_DB_ENDPOINT",
checks: [isNotEmpty],
},
/*
PGVector Options
- Does very simple validations - we should expand this in the future
- to ensure the connection string is valid and the table name is valid
- via direct query
*/
PGVectorConnectionString: {
envKey: "PGVECTOR_CONNECTION_STRING",
checks: [isNotEmpty, looksLikePostgresConnectionString],
preUpdate: [validatePGVectorConnectionString],
},
PGVectorTableName: {
envKey: "PGVECTOR_TABLE_NAME",
checks: [isNotEmpty],
preUpdate: [validatePGVectorTableName],
},
// Together Ai Options
TogetherAiApiKey: {
envKey: "TOGETHER_AI_API_KEY",
checks: [isNotEmpty],
},
TogetherAiModelPref: {
envKey: "TOGETHER_AI_MODEL_PREF",
checks: [isNotEmpty],
},
// Fireworks AI Options
FireworksAiLLMApiKey: {
envKey: "FIREWORKS_AI_LLM_API_KEY",
checks: [isNotEmpty],
},
FireworksAiLLMModelPref: {
envKey: "FIREWORKS_AI_LLM_MODEL_PREF",
checks: [isNotEmpty],
},
// Perplexity Options
PerplexityApiKey: {
envKey: "PERPLEXITY_API_KEY",
checks: [isNotEmpty],
},
PerplexityModelPref: {
envKey: "PERPLEXITY_MODEL_PREF",
checks: [isNotEmpty],
},
// OpenRouter Options
OpenRouterApiKey: {
envKey: "OPENROUTER_API_KEY",
checks: [isNotEmpty],
},
OpenRouterModelPref: {
envKey: "OPENROUTER_MODEL_PREF",
checks: [isNotEmpty],
},
OpenRouterTimeout: {
envKey: "OPENROUTER_TIMEOUT_MS",
checks: [],
},
// Novita Options
NovitaLLMApiKey: {
envKey: "NOVITA_LLM_API_KEY",
checks: [isNotEmpty],
},
NovitaLLMModelPref: {
envKey: "NOVITA_LLM_MODEL_PREF",
checks: [isNotEmpty],
},
NovitaLLMTimeout: {
envKey: "NOVITA_LLM_TIMEOUT_MS",
checks: [],
},
// Groq Options
GroqApiKey: {
envKey: "GROQ_API_KEY",
checks: [isNotEmpty],
},
GroqModelPref: {
envKey: "GROQ_MODEL_PREF",
checks: [isNotEmpty],
},
// Cohere Options
CohereApiKey: {
envKey: "COHERE_API_KEY",
checks: [isNotEmpty],
},
CohereModelPref: {
envKey: "COHERE_MODEL_PREF",
checks: [isNotEmpty],
},
// VoyageAi Options
VoyageAiApiKey: {
envKey: "VOYAGEAI_API_KEY",
checks: [isNotEmpty],
},
// Whisper (transcription) providers
WhisperProvider: {
envKey: "WHISPER_PROVIDER",
checks: [isNotEmpty, supportedTranscriptionProvider],
postUpdate: [],
},
WhisperModelPref: {
envKey: "WHISPER_MODEL_PREF",
checks: [validLocalWhisper],
postUpdate: [],
},
// System Settings
AuthToken: {
envKey: "AUTH_TOKEN",
checks: [requiresForceMode, noRestrictedChars],
},
JWTSecret: {
envKey: "JWT_SECRET",
checks: [requiresForceMode],
},
DisableTelemetry: {
envKey: "DISABLE_TELEMETRY",
checks: [],
preUpdate: [
(_, __, nextValue) => {
if (nextValue === "true") Telemetry.sendTelemetry("telemetry_disabled");
},
],
},
// Agent Integration ENVs
AgentGoogleSearchEngineId: {
envKey: "AGENT_GSE_CTX",
checks: [],
},
AgentGoogleSearchEngineKey: {
envKey: "AGENT_GSE_KEY",
checks: [],
},
AgentSerpApiKey: {
envKey: "AGENT_SERPAPI_API_KEY",
checks: [],
},
AgentSerpApiEngine: {
envKey: "AGENT_SERPAPI_ENGINE",
checks: [],
},
AgentSearchApiKey: {
envKey: "AGENT_SEARCHAPI_API_KEY",
checks: [],
},
AgentSearchApiEngine: {
envKey: "AGENT_SEARCHAPI_ENGINE",
checks: [],
},
AgentSerperApiKey: {
envKey: "AGENT_SERPER_DEV_KEY",
checks: [],
},
AgentBingSearchApiKey: {
envKey: "AGENT_BING_SEARCH_API_KEY",
checks: [],
},
AgentSerplyApiKey: {
envKey: "AGENT_SERPLY_API_KEY",
checks: [],
},
AgentSearXNGApiUrl: {
envKey: "AGENT_SEARXNG_API_URL",
checks: [],
},
AgentTavilyApiKey: {
envKey: "AGENT_TAVILY_API_KEY",
checks: [],
},
AgentExaApiKey: {
envKey: "AGENT_EXA_API_KEY",
checks: [],
},
// TTS/STT Integration ENVS
TextToSpeechProvider: {
envKey: "TTS_PROVIDER",
checks: [supportedTTSProvider],
},
// TTS OpenAI
TTSOpenAIKey: {
envKey: "TTS_OPEN_AI_KEY",
checks: [validOpenAIKey],
},
TTSOpenAIVoiceModel: {
envKey: "TTS_OPEN_AI_VOICE_MODEL",
checks: [],
},
// TTS ElevenLabs
TTSElevenLabsKey: {
envKey: "TTS_ELEVEN_LABS_KEY",
checks: [isNotEmpty],
},
TTSElevenLabsVoiceModel: {
envKey: "TTS_ELEVEN_LABS_VOICE_MODEL",
checks: [],
},
// PiperTTS Local
TTSPiperTTSVoiceModel: {
envKey: "TTS_PIPER_VOICE_MODEL",
checks: [],
},
// OpenAI Generic TTS
TTSOpenAICompatibleKey: {
envKey: "TTS_OPEN_AI_COMPATIBLE_KEY",
checks: [],
},
TTSOpenAICompatibleModel: {
envKey: "TTS_OPEN_AI_COMPATIBLE_MODEL",
checks: [],
},
TTSOpenAICompatibleVoiceModel: {
envKey: "TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL",
checks: [isNotEmpty],
},
TTSOpenAICompatibleEndpoint: {
envKey: "TTS_OPEN_AI_COMPATIBLE_ENDPOINT",
checks: [isValidURL],
},
// DeepSeek Options
DeepSeekApiKey: {
envKey: "DEEPSEEK_API_KEY",
checks: [isNotEmpty],
},
DeepSeekModelPref: {
envKey: "DEEPSEEK_MODEL_PREF",
checks: [isNotEmpty],
},
// APIPie Options
ApipieLLMApiKey: {
envKey: "APIPIE_LLM_API_KEY",
checks: [isNotEmpty],
},
ApipieLLMModelPref: {
envKey: "APIPIE_LLM_MODEL_PREF",
checks: [isNotEmpty],
},
// xAI Options
XAIApiKey: {
envKey: "XAI_LLM_API_KEY",
checks: [isNotEmpty],
},
XAIModelPref: {
envKey: "XAI_LLM_MODEL_PREF",
checks: [isNotEmpty],
},
// Nvidia NIM Options
NvidiaNimLLMBasePath: {
envKey: "NVIDIA_NIM_LLM_BASE_PATH",
checks: [isValidURL],
postUpdate: [
(_, __, nextValue) => {
const { parseNvidiaNimBasePath } = require("../AiProviders/nvidiaNim");
process.env.NVIDIA_NIM_LLM_BASE_PATH =
parseNvidiaNimBasePath(nextValue);
},
],
},
NvidiaNimLLMModelPref: {
envKey: "NVIDIA_NIM_LLM_MODEL_PREF",
checks: [],
postUpdate: [
async (_, __, nextValue) => {
const { NvidiaNimLLM } = require("../AiProviders/nvidiaNim");
await NvidiaNimLLM.setModelTokenLimit(nextValue);
},
],
},
// PPIO Options
PPIOApiKey: {
envKey: "PPIO_API_KEY",
checks: [isNotEmpty],
},
PPIOModelPref: {
envKey: "PPIO_MODEL_PREF",
checks: [isNotEmpty],
},
// Moonshot AI Options
MoonshotAiApiKey: {
envKey: "MOONSHOT_AI_API_KEY",
checks: [isNotEmpty],
},
MoonshotAiModelPref: {
envKey: "MOONSHOT_AI_MODEL_PREF",
checks: [isNotEmpty],
},
// Foundry Options
FoundryBasePath: {
envKey: "FOUNDRY_BASE_PATH",
checks: [isNotEmpty],
},
FoundryModelPref: {
envKey: "FOUNDRY_MODEL_PREF",
checks: [isNotEmpty],
postUpdate: [
// On new model selection, re-cache the context windows
async (_, prevValue, __) => {
const { FoundryLLM } = require("../AiProviders/foundry");
await FoundryLLM.unloadModelFromEngine(prevValue);
await FoundryLLM.cacheContextWindows(true);
},
],
},
FoundryModelTokenLimit: {
envKey: "FOUNDRY_MODEL_TOKEN_LIMIT",
checks: [],
},
// CometAPI Options
CometApiLLMApiKey: {
envKey: "COMETAPI_LLM_API_KEY",
checks: [isNotEmpty],
},
CometApiLLMModelPref: {
envKey: "COMETAPI_LLM_MODEL_PREF",
checks: [isNotEmpty],
},
CometApiLLMTimeout: {
envKey: "COMETAPI_LLM_TIMEOUT_MS",
checks: [],
},
// Z.AI Options
ZAiApiKey: {
envKey: "ZAI_API_KEY",
checks: [isNotEmpty],
},
ZAiModelPref: {
envKey: "ZAI_MODEL_PREF",
checks: [isNotEmpty],
},
// GiteeAI Options
GiteeAIApiKey: {
envKey: "GITEE_AI_API_KEY",
checks: [isNotEmpty],
},
GiteeAIModelPref: {
envKey: "GITEE_AI_MODEL_PREF",
checks: [isNotEmpty],
},
GiteeAITokenLimit: {
envKey: "GITEE_AI_MODEL_TOKEN_LIMIT",
checks: [nonZero],
},
};
function isNotEmpty(input = "") {
return !input || input.length === 0 ? "Value cannot be empty" : null;
}
function nonZero(input = "") {
if (isNaN(Number(input))) return "Value must be a number";
return Number(input) <= 0 ? "Value must be greater than zero" : null;
}
function isInteger(input = "") {
if (isNaN(Number(input))) return "Value must be a number";
return Number(input);
}
function isValidURL(input = "") {
try {
new URL(input);
return null;
} catch (e) {
return "URL is not a valid URL.";
}
}
function validOpenAIKey(input = "") {
return input.startsWith("sk-") ? null : "OpenAI Key must start with sk-";
}
function validAnthropicApiKey(input = "") {
return input.startsWith("sk-ant-")
? null
: "Anthropic Key must start with sk-ant-";
}
function validLLMExternalBasePath(input = "") {
try {
new URL(input);
if (!input.includes("v1")) return "URL must include /v1";
if (input.split("").slice(-1)?.[0] === "/")
return "URL cannot end with a slash";
return null;
} catch {
return "Not a valid URL";
}
}
function validOllamaLLMBasePath(input = "") {
try {
new URL(input);
if (input.split("").slice(-1)?.[0] === "/")
return "URL cannot end with a slash";
return null;
} catch {
return "Not a valid URL";
}
}
function supportedTTSProvider(input = "") {
const validSelection = [
"native",
"openai",
"elevenlabs",
"piper_local",
"generic-openai",
].includes(input);
return validSelection ? null : `${input} is not a valid TTS provider.`;
}
function validLocalWhisper(input = "") {
const validSelection = [
"Xenova/whisper-small",
"Xenova/whisper-large",
].includes(input);
return validSelection
? null
: `${input} is not a valid Whisper model selection.`;
}
function supportedLLM(input = "") {
const validSelection = [
"openai",
"azure",
"anthropic",
"gemini",
"lmstudio",
"localai",
"ollama",
"togetherai",
"fireworksai",
"mistral",
"huggingface",
"perplexity",
"openrouter",
"novita",
"groq",
"koboldcpp",
"textgenwebui",
"cohere",
"litellm",
"generic-openai",
"bedrock",
"deepseek",
"apipie",
"xai",
"nvidia-nim",
"ppio",
"dpais",
"moonshotai",
"cometapi",
"foundry",
"zai",
"giteeai",
].includes(input);
return validSelection ? null : `${input} is not a valid LLM provider.`;
}
function supportedTranscriptionProvider(input = "") {
const validSelection = ["openai", "local"].includes(input);
return validSelection
? null
: `${input} is not a valid transcription model provider.`;
}
function validGeminiSafetySetting(input = "") {
const validModes = [
"BLOCK_NONE",
"BLOCK_ONLY_HIGH",
"BLOCK_MEDIUM_AND_ABOVE",
"BLOCK_LOW_AND_ABOVE",
];
return validModes.includes(input)
? null
: `Invalid Safety setting. Must be one of ${validModes.join(", ")}.`;
}
function supportedEmbeddingModel(input = "") {
const supported = [
"openai",
"azure",
"gemini",
"localai",
"native",
"ollama",
"lmstudio",
"cohere",
"voyageai",
"litellm",
"generic-openai",
"mistral",
"openrouter",
];
return supported.includes(input)
? null
: `Invalid Embedding model type. Must be one of ${supported.join(", ")}.`;
}
function supportedVectorDB(input = "") {
const supported = [
"chroma",
"chromacloud",
"pinecone",
"lancedb",
"weaviate",
"qdrant",
"milvus",
"zilliz",
"astra",
"pgvector",
];
return supported.includes(input)
? null
: `Invalid VectorDB type. Must be one of ${supported.join(", ")}.`;
}
function validChromaURL(input = "") {
return input.slice(-1) === "/"
? `Chroma Instance URL should not end in a trailing slash.`
: null;
}
function validOpenAiTokenLimit(input = "") {
const tokenLimit = Number(input);
if (isNaN(tokenLimit)) return "Token limit is not a number";
return null;
}
function requiresForceMode(_, forceModeEnabled = false) {
return forceModeEnabled === true ? null : "Cannot set this setting.";
}
async function validDockerizedUrl(input = "") {
if (process.env.ANYTHING_LLM_RUNTIME !== "docker") return null;
try {
const { isPortInUse, getLocalHosts } = require("./portAvailabilityChecker");
const localInterfaces = getLocalHosts();
const url = new URL(input);
const hostname = url.hostname.toLowerCase();
const port = parseInt(url.port, 10);
// If not a loopback, skip this check.
if (!localInterfaces.includes(hostname)) return null;
if (isNaN(port)) return "Invalid URL: Port is not specified or invalid";
const isPortAvailableFromDocker = await isPortInUse(port, hostname);
if (isPortAvailableFromDocker)
return "Port is not running a reachable service on loopback address from inside the AnythingLLM container. Please use host.docker.internal (for linux use 172.17.0.1), a real machine ip, or domain to connect to your service.";
} catch (error) {
console.error(error.message);
return "An error occurred while validating the URL";
}
return null;
}
function validHuggingFaceEndpoint(input = "") {
return input.slice(-6) !== ".cloud"
? `Your HF Endpoint should end in ".cloud"`
: null;
}
function noRestrictedChars(input = "") {
const regExp = new RegExp(/^[a-zA-Z0-9_\-!@$%^&*();]+$/);
return !regExp.test(input)
? `Your password has restricted characters in it. Allowed symbols are _,-,!,@,$,%,^,&,*,(,),;`
: null;
}
async function handleVectorStoreReset(key, prevValue, nextValue) {
if (prevValue === nextValue) return;
if (key === "VectorDB") {
console.log(
`Vector configuration changed from ${prevValue} to ${nextValue} - resetting ${prevValue} namespaces`
);
return await resetAllVectorStores({ vectorDbKey: prevValue });
}
if (key === "EmbeddingEngine" || key === "EmbeddingModelPref") {
console.log(
`${key} changed from ${prevValue} to ${nextValue} - resetting ${process.env.VECTOR_DB} namespaces`
);
return await resetAllVectorStores({ vectorDbKey: process.env.VECTOR_DB });
}
return false;
}
/**
* Downloads the embedding model in background if the user has selected a different model
* - Only supported for the native embedder
* - Must have the native embedder selected prior (otherwise will download on embed)
*/
async function downloadEmbeddingModelIfRequired(key, prevValue, nextValue) {
if (prevValue === nextValue) return;
if (key !== "EmbeddingModelPref" || process.env.EMBEDDING_ENGINE !== "native")
return;
const { NativeEmbedder } = require("../EmbeddingEngines/native");
if (!NativeEmbedder.supportedModels[nextValue]) return; // if the model is not supported, don't download it
new NativeEmbedder().embedderClient();
return false;
}
/**
* Validates the Postgres connection string for the PGVector options.
* @param {string} input - The Postgres connection string to validate.
* @returns {string} - An error message if the connection string is invalid, otherwise null.
*/
async function looksLikePostgresConnectionString(connectionString = null) {
if (!connectionString || !connectionString.startsWith("postgresql://"))
return "Invalid Postgres connection string. Must start with postgresql://";
if (connectionString.includes(" "))
return "Invalid Postgres connection string. Must not contain spaces.";
return null;
}
/**
* Validates the Postgres connection string for the PGVector options.
* @param {string} key - The ENV key we are validating.
* @param {string} prevValue - The previous value of the key.
* @param {string} nextValue - The next value of the key.
* @returns {string} - An error message if the connection string is invalid, otherwise null.
*/
async function validatePGVectorConnectionString(key, prevValue, nextValue) {
const envKey = KEY_MAPPING[key].envKey;
if (prevValue === nextValue) return; // If the value is the same as the previous value, don't validate it.
if (!nextValue) return; // If the value is not set, don't validate it.
if (nextValue === process.env[envKey]) return; // If the value is the same as the current connection string, don't validate it.
const { PGVector } = require("../vectorDbProviders/pgvector");
const { error, success } = await PGVector.validateConnection({
connectionString: nextValue,
});
if (!success) return error;
// Set the ENV variable for the PGVector connection string early so we can use it in the table check.
process.env[envKey] = nextValue;
return null;
}
/**
* Validates the Postgres table name for the PGVector options.
* - Table should not already exist in the database.
* @param {string} key - The ENV key we are validating.
* @param {string} prevValue - The previous value of the key.
* @param {string} nextValue - The next value of the key.
* @returns {string} - An error message if the table name is invalid, otherwise null.
*/
async function validatePGVectorTableName(key, prevValue, nextValue) {
const envKey = KEY_MAPPING[key].envKey;
if (prevValue === nextValue) return; // If the value is the same as the previous value, don't validate it.
if (!nextValue) return; // If the value is not set, don't validate it.
if (nextValue === process.env[envKey]) return; // If the value is the same as the current table name, don't validate it.
if (!process.env.PGVECTOR_CONNECTION_STRING) return; // if connection string is not set, don't validate it since it will fail.
const { PGVector } = require("../vectorDbProviders/pgvector");
const { error, success } = await PGVector.validateConnection({
connectionString: process.env.PGVECTOR_CONNECTION_STRING,
tableName: nextValue,
});
if (!success) return error;
return null;
}
// This will force update .env variables which for any which reason were not able to be parsed or
// read from an ENV file as this seems to be a complicating step for many so allowing people to write
// to the process will at least alleviate that issue. It does not perform comprehensive validity checks or sanity checks
// and is simply for debugging when the .env not found issue many come across.
async function updateENV(newENVs = {}, force = false, userId = null) {
let error = "";
const validKeys = Object.keys(KEY_MAPPING);
const ENV_KEYS = Object.keys(newENVs).filter(
(key) => validKeys.includes(key) && !newENVs[key].includes("******") // strip out answers where the value is all asterisks
);
const newValues = {};
for (const key of ENV_KEYS) {
const {
envKey,
checks,
preUpdate = [],
postUpdate = [],
} = KEY_MAPPING[key];
const prevValue = process.env[envKey];
const nextValue = newENVs[key];
let errors = await executeValidationChecks(checks, nextValue, force);
// If there are any errors from regular simple validation checks
// exit early.
if (errors.length > 0) {
error += errors.join("\n");
break;
}
// Accumulate errors from preUpdate functions
errors = [];
for (const preUpdateFunc of preUpdate) {
const errorMsg = await preUpdateFunc(key, prevValue, nextValue);
if (!!errorMsg && typeof errorMsg === "string") errors.push(errorMsg);
}
// If there are any errors from preUpdate functions
// exit early.
if (errors.length > 0) {
error += errors.join("\n");
break;
}
newValues[key] = nextValue;
process.env[envKey] = nextValue;
for (const postUpdateFunc of postUpdate)
await postUpdateFunc(key, prevValue, nextValue);
}
await logChangesToEventLog(newValues, userId);
if (process.env.NODE_ENV === "production") dumpENV();
return { newValues, error: error?.length > 0 ? error : false };
}
async function executeValidationChecks(checks, value, force) {
const results = await Promise.all(
checks.map((validator) => validator(value, force))
);
return results.filter((err) => typeof err === "string");
}
async function logChangesToEventLog(newValues = {}, userId = null) {
const { EventLogs } = require("../../models/eventLogs");
const eventMapping = {
LLMProvider: "update_llm_provider",
EmbeddingEngine: "update_embedding_engine",
VectorDB: "update_vector_db",
};
for (const [key, eventName] of Object.entries(eventMapping)) {
if (!newValues.hasOwnProperty(key)) continue;
await EventLogs.logEvent(eventName, {}, userId);
}
return;
}
function dumpENV() {
const fs = require("fs");
const path = require("path");
const frozenEnvs = {};
const protectedKeys = [
...Object.values(KEY_MAPPING).map((values) => values.envKey),
// Manually Add Keys here which are not already defined in KEY_MAPPING
// and are either managed or manually set ENV key:values.
"JWT_EXPIRY",
"STORAGE_DIR",
"SERVER_PORT",
// For persistent data encryption
"SIG_KEY",
"SIG_SALT",
// Password Schema Keys if present.
"PASSWORDMINCHAR",
"PASSWORDMAXCHAR",
"PASSWORDLOWERCASE",
"PASSWORDUPPERCASE",
"PASSWORDNUMERIC",
"PASSWORDSYMBOL",
"PASSWORDREQUIREMENTS",
// HTTPS SETUP KEYS
"ENABLE_HTTPS",
"HTTPS_CERT_PATH",
"HTTPS_KEY_PATH",
// Other Configuration Keys
"DISABLE_VIEW_CHAT_HISTORY",
// Simple SSO
"SIMPLE_SSO_ENABLED",
"SIMPLE_SSO_NO_LOGIN",
"SIMPLE_SSO_NO_LOGIN_REDIRECT",
// Community Hub
"COMMUNITY_HUB_BUNDLE_DOWNLOADS_ENABLED",
// Nvidia NIM Keys that are automatically managed
"NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT",
// OCR Language Support
"TARGET_OCR_LANG",
// Collector API common ENV - allows bypassing URL validation checks
"COLLECTOR_ALLOW_ANY_IP",
// Allow disabling of streaming for generic openai
"GENERIC_OPENAI_STREAMING_DISABLED",
// Specify Chromium args for collector
"ANYTHINGLLM_CHROMIUM_ARGS",
// Allow setting a custom response timeout for Ollama
"OLLAMA_RESPONSE_TIMEOUT",
// Allow disabling of MCP tool cooldown
"MCP_NO_COOLDOWN",
];
// Simple sanitization of each value to prevent ENV injection via newline or quote escaping.
function sanitizeValue(value) {
const offendingChars =
/[\n\r\t\v\f\u0085\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000"'`#]/;
const firstOffendingCharIndex = value.search(offendingChars);
if (firstOffendingCharIndex === -1) return value;
return value.substring(0, firstOffendingCharIndex);
}
for (const key of protectedKeys) {
const envValue = process.env?.[key] || null;
if (!envValue) continue;
frozenEnvs[key] = process.env?.[key] || null;
}
var envResult = `# Auto-dump ENV from system call on ${new Date().toTimeString()}\n`;
envResult += Object.entries(frozenEnvs)
.map(([key, value]) => `${key}='${sanitizeValue(value)}'`)
.join("\n");
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | true |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/helpers/search.js | server/utils/helpers/search.js | const { Workspace } = require("../../models/workspace");
const { WorkspaceThread } = require("../../models/workspaceThread");
const fastLevenshtein = require("fast-levenshtein");
// allow a pretty loose levenshtein distance for the search
// since we would rather show a few more results than less
const FAST_LEVENSHTEIN_DISTANCE = 3;
/**
* Search for workspaces and threads based on a search term with optional user context.
* For each type of item we are looking at the `name` field.
* - If the normalized name, starts with, includes, or ends with the search term => match
* - If the normalized name is within 2 levenshtein distance of the search term => match
* @param {string} searchTerm - The search term to search for.
* @param {Object} user - The user to search for.
* @returns {Promise<{workspaces: Array<{slug: string, name: string}>, threads: Array<{slug: string, name: string, workspace: {slug: string, name: string}}>}>} - The search results.
*/
async function searchWorkspaceAndThreads(searchTerm, user = null) {
searchTerm = String(searchTerm).trim(); // Ensure searchTerm is a string and trimmed.
if (!searchTerm || searchTerm.length < 3)
return { workspaces: [], threads: [] };
searchTerm = searchTerm.toLowerCase();
// To prevent duplicates in O(1) time, we use sets which will be
// STRINGIFIED results of matching workspaces or threads. We then
// parse them back into objects at the end.
const results = {
workspaces: new Set(),
threads: new Set(),
};
async function searchWorkspaces() {
const workspaces = !!user
? await Workspace.whereWithUser(user)
: await Workspace.where();
for (const workspace of workspaces) {
const wsName = workspace.name.toLowerCase();
if (
wsName.startsWith(searchTerm) ||
wsName.includes(searchTerm) ||
wsName.endsWith(searchTerm) ||
fastLevenshtein.get(wsName, searchTerm) <= FAST_LEVENSHTEIN_DISTANCE
)
results.workspaces.add(
JSON.stringify({ slug: workspace.slug, name: workspace.name })
);
}
}
async function searchThreads() {
const threads = !!user
? await WorkspaceThread.where(
{ user_id: user.id },
undefined,
undefined,
{ workspace: { select: { slug: true, name: true } } }
)
: await WorkspaceThread.where(undefined, undefined, undefined, {
workspace: { select: { slug: true, name: true } },
});
for (const thread of threads) {
const threadName = thread.name.toLowerCase();
if (
threadName.startsWith(searchTerm) ||
threadName.includes(searchTerm) ||
threadName.endsWith(searchTerm) ||
fastLevenshtein.get(threadName, searchTerm) <= FAST_LEVENSHTEIN_DISTANCE
)
results.threads.add(
JSON.stringify({
slug: thread.slug,
name: thread.name,
workspace: {
slug: thread.workspace.slug,
name: thread.workspace.name,
},
})
);
}
}
// Run both searches in parallel - this modifies the results set in place.
await Promise.all([searchWorkspaces(), searchThreads()]);
// Parse the results back into objects.
const workspaces = Array.from(results.workspaces).map(JSON.parse);
const threads = Array.from(results.threads).map(JSON.parse);
return { workspaces, threads };
}
module.exports = { searchWorkspaceAndThreads };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/helpers/chat/LLMPerformanceMonitor.js | server/utils/helpers/chat/LLMPerformanceMonitor.js | const { TokenManager } = require("../tiktoken");
/**
* @typedef {import("openai/streaming").Stream<import("openai").OpenAI.ChatCompletionChunk>} OpenAICompatibleStream
* @typedef {(reportedUsage: {[key: string]: number, completion_tokens?: number, prompt_tokens?: number}) => StreamMetrics} EndMeasurementFunction
* @typedef {Array<{content: string}>} Messages
*/
/**
* @typedef {Object} StreamMetrics
* @property {number} prompt_tokens - the number of tokens in the prompt
* @property {number} completion_tokens - the number of tokens in the completion
* @property {number} total_tokens - the total number of tokens
* @property {number} outputTps - the tokens per second of the output
* @property {number} duration - the duration of the stream
*/
/**
* @typedef {Object} MonitoredStream
* @property {number} start - the start time of the stream
* @property {number} duration - the duration of the stream
* @property {StreamMetrics} metrics - the metrics of the stream
* @property {EndMeasurementFunction} endMeasurement - the method to end the stream and calculate the metrics
*/
class LLMPerformanceMonitor {
static tokenManager = new TokenManager();
/**
* Counts the tokens in the messages.
* @param {Array<{content: string}>} messages - the messages sent to the LLM so we can calculate the prompt tokens since most providers do not return this on stream
* @returns {number}
*/
static countTokens(messages = []) {
try {
return this.tokenManager.statsFrom(messages);
} catch (e) {
return 0;
}
}
/**
* Wraps a function and logs the duration (in seconds) of the function call.
* If the output contains a `usage.duration` property, it will be used instead of the calculated duration.
* This allows providers to supply more accurate timing information.
* @param {Function} func
* @returns {Promise<{output: any, duration: number}>}
*/
static measureAsyncFunction(func) {
return (async () => {
const start = Date.now();
const output = await func; // is a promise
const end = Date.now();
const duration = output?.usage?.duration ?? (end - start) / 1000;
return { output, duration };
})();
}
/**
* Wraps a completion stream and and attaches a start time and duration property to the stream.
* Also attaches an `endMeasurement` method to the stream that will calculate the duration of the stream and metrics.
* @param {Object} opts
* @param {Promise<OpenAICompatibleStream>} opts.func
* @param {Messages} [opts.messages=[]] - the messages sent to the LLM so we can calculate the prompt tokens since most providers do not return this on stream
* @param {boolean} [opts.runPromptTokenCalculation=true] - whether to run the prompt token calculation to estimate the `prompt_tokens` metric. This is useful for providers that do not return this on stream.
* @param {string} [opts.modelTag=""] - the tag of the model that was used to generate the stream (eg: gpt-4o, claude-3-5-sonnet, qwen3/72b-instruct, etc.)
* @returns {Promise<MonitoredStream>}
*/
static async measureStream({
func,
messages = [],
runPromptTokenCalculation = true,
modelTag = "",
}) {
const stream = await func;
stream.start = Date.now();
stream.duration = 0;
stream.metrics = {
completion_tokens: 0,
prompt_tokens: runPromptTokenCalculation ? this.countTokens(messages) : 0,
total_tokens: 0,
outputTps: 0,
duration: 0,
...(modelTag ? { model: modelTag } : {}),
};
stream.endMeasurement = (reportedUsage = {}) => {
const end = Date.now();
const estimatedDuration = (end - stream.start) / 1000;
// Merge the reported usage with the existing metrics
// so the math in the metrics object is correct when calculating
stream.metrics = {
...stream.metrics,
...reportedUsage,
duration: reportedUsage?.duration ?? estimatedDuration,
timestamp: new Date(),
};
stream.metrics.total_tokens =
stream.metrics.prompt_tokens + (stream.metrics.completion_tokens || 0);
stream.metrics.outputTps =
stream.metrics.completion_tokens / stream.metrics.duration;
return stream.metrics;
};
return stream;
}
}
module.exports = {
LLMPerformanceMonitor,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/helpers/chat/responses.js | server/utils/helpers/chat/responses.js | const { v4: uuidv4 } = require("uuid");
const moment = require("moment");
function clientAbortedHandler(resolve, fullText) {
console.log(
"\x1b[43m\x1b[34m[STREAM ABORTED]\x1b[0m Client requested to abort stream. Exiting LLM stream handler early."
);
resolve(fullText);
return;
}
/**
* Handles the default stream response for a chat.
* @param {import("express").Response} response
* @param {import('./LLMPerformanceMonitor').MonitoredStream} stream
* @param {Object} responseProps
* @returns {Promise<string>}
*/
function handleDefaultStreamResponseV2(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
// Why are we doing this?
// OpenAI do enable the usage metrics in the stream response but:
// 1. This parameter is not available in our current API version (TODO: update)
// 2. The usage metrics are not available in _every_ provider that uses this function
// 3. We need to track the usage metrics for every provider that uses this function - not just OpenAI
// Other keys are added by the LLMPerformanceMonitor.measureStream method
let hasUsageMetrics = false;
let usage = {
// prompt_tokens can be in this object if the provider supports it - otherwise we manually count it
// When the stream is created in the LLMProviders `streamGetChatCompletion` `LLMPerformanceMonitor.measureStream` call.
completion_tokens: 0,
};
return new Promise(async (resolve) => {
let fullText = "";
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => {
stream?.endMeasurement(usage);
clientAbortedHandler(resolve, fullText);
};
response.on("close", handleAbort);
// Now handle the chunks from the streamed response and append to fullText.
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
// If we see usage metrics in the chunk, we can use them directly
// instead of estimating them, but we only want to assign values if
// the response object is the exact same key:value pair we expect.
if (
chunk.hasOwnProperty("usage") && // exists
!!chunk.usage && // is not null
Object.values(chunk.usage).length > 0 // has values
) {
if (chunk.usage.hasOwnProperty("prompt_tokens")) {
usage.prompt_tokens = Number(chunk.usage.prompt_tokens);
}
if (chunk.usage.hasOwnProperty("completion_tokens")) {
hasUsageMetrics = true; // to stop estimating counter
usage.completion_tokens = Number(chunk.usage.completion_tokens);
}
}
if (token) {
fullText += token;
// If we never saw a usage metric, we can estimate them by number of completion chunks
if (!hasUsageMetrics) usage.completion_tokens++;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
// LocalAi returns '' and others return null on chunks - the last chunk is not "" or null.
// Either way, the key `finish_reason` must be present to determine ending chunk.
if (
message?.hasOwnProperty("finish_reason") && // Got valid message and it is an object with finish_reason
message.finish_reason !== "" &&
message.finish_reason !== null
) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
stream?.endMeasurement(usage);
resolve(fullText);
break; // Break streaming when a valid finish_reason is first encountered
}
}
} catch (e) {
console.log(`\x1b[43m\x1b[34m[STREAMING ERROR]\x1b[0m ${e.message}`);
writeResponseChunk(response, {
uuid,
type: "abort",
textResponse: null,
sources: [],
close: true,
error: e.message,
});
stream?.endMeasurement(usage);
resolve(fullText); // Return what we currently have - if anything.
}
});
}
function convertToChatHistory(history = []) {
const formattedHistory = [];
for (const record of history) {
const { prompt, response, createdAt, feedbackScore = null, id } = record;
const data = JSON.parse(response);
// In the event that a bad response was stored - we should skip its entire record
// because it was likely an error and cannot be used in chats and will fail to render on UI.
if (typeof prompt !== "string") {
console.log(
`[convertToChatHistory] ChatHistory #${record.id} prompt property is not a string - skipping record.`
);
continue;
} else if (typeof data.text !== "string") {
console.log(
`[convertToChatHistory] ChatHistory #${record.id} response.text property is not a string - skipping record.`
);
continue;
}
formattedHistory.push([
{
role: "user",
content: prompt,
sentAt: moment(createdAt).unix(),
attachments: data?.attachments ?? [],
chatId: id,
},
{
type: data?.type || "chart",
role: "assistant",
content: data.text,
sources: data.sources || [],
chatId: id,
sentAt: moment(createdAt).unix(),
feedbackScore,
metrics: data?.metrics || {},
},
]);
}
return formattedHistory.flat();
}
/**
* Converts a chat history to a prompt history.
* @param {Object[]} history - The chat history to convert
* @returns {{role: string, content: string, attachments?: import("..").Attachment}[]}
*/
function convertToPromptHistory(history = []) {
const formattedHistory = [];
for (const record of history) {
const { prompt, response } = record;
const data = JSON.parse(response);
// In the event that a bad response was stored - we should skip its entire record
// because it was likely an error and cannot be used in chats and will fail to render on UI.
if (typeof prompt !== "string") {
console.log(
`[convertToPromptHistory] ChatHistory #${record.id} prompt property is not a string - skipping record.`
);
continue;
} else if (typeof data.text !== "string") {
console.log(
`[convertToPromptHistory] ChatHistory #${record.id} response.text property is not a string - skipping record.`
);
continue;
}
formattedHistory.push([
{
role: "user",
content: prompt,
// if there are attachments, add them as a property to the user message so we can reuse them in chat history later if supported by the llm.
...(data?.attachments?.length > 0
? { attachments: data?.attachments }
: {}),
},
{
role: "assistant",
content: data.text,
},
]);
}
return formattedHistory.flat();
}
/**
* Safely stringifies any object containing BigInt values
* @param {*} obj - Anything to stringify that might contain BigInt values
* @returns {string} JSON string with BigInt values converted to strings
*/
function safeJSONStringify(obj) {
return JSON.stringify(obj, (_, value) => {
if (typeof value === "bigint") return value.toString();
return value;
});
}
function writeResponseChunk(response, data) {
response.write(`data: ${safeJSONStringify(data)}\n\n`);
return;
}
/**
* Formats the chat history to re-use attachments in the chat history
* that might have existed in the conversation earlier.
* @param {{role:string, content:string, attachments?: Object[]}[]} chatHistory
* @param {function} formatterFunction - The function to format the chat history from the llm provider
* @param {('asProperty'|'spread')} mode - "asProperty" or "spread". Determines how the content is formatted in the message object.
* @returns {object[]}
*/
function formatChatHistory(
chatHistory = [],
formatterFunction,
mode = "asProperty"
) {
return chatHistory.map((historicalMessage) => {
if (
historicalMessage?.role !== "user" || // Only user messages can have attachments
!historicalMessage?.attachments || // If there are no attachments, we can skip this
!historicalMessage.attachments.length // If there is an array but it is empty, we can skip this
)
return historicalMessage;
// Some providers, like Ollama, expect the content to be embedded in the message object.
if (mode === "spread") {
return {
role: historicalMessage.role,
...formatterFunction({
userPrompt: historicalMessage.content,
attachments: historicalMessage.attachments,
}),
};
}
// Most providers expect the content to be a property of the message object formatted like OpenAI models.
return {
role: historicalMessage.role,
content: formatterFunction({
userPrompt: historicalMessage.content,
attachments: historicalMessage.attachments,
}),
};
});
}
module.exports = {
handleDefaultStreamResponseV2,
convertToChatHistory,
convertToPromptHistory,
writeResponseChunk,
clientAbortedHandler,
formatChatHistory,
safeJSONStringify,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/helpers/chat/index.js | server/utils/helpers/chat/index.js | const { sourceIdentifier } = require("../../chats");
const { safeJsonParse } = require("../../http");
const { TokenManager } = require("../tiktoken");
const { convertToPromptHistory } = require("./responses");
/*
What is the message Array compressor?
TLDR: So anyway, i started blasting (your prompts & stuff)
messageArrayCompressor arose out of a need for users to be able to insert unlimited token prompts
and also maintain coherent history, system instructions and context, if applicable.
We took an opinionated approach that after much back-testing we have found retained a highly coherent answer
under most user conditions that a user would take while using this specific system. While other systems may
use a more advanced model for compressing message history or simplify text through a recursive approach - our is much more simple.
We "cannonball" the input.
Cannonball (verb): To ensure a prompt fits through a model window we blast a hole in the center of any inputs blocking our path to doing so.
This starts by dissecting the input as tokens and delete from the middle-out bi-directionally until the prompt window is satisfied.
You may think: "Doesn't this result in massive data loss?" - yes & no.
Under the use cases we expect the tool to be used, which is mostly chatting with documents, we are able to use this approach with minimal blowback
on the quality of responses.
We accomplish this by taking a rate-limit approach that is proportional to the model capacity. Since we support more than openAI models, this needs to
be generic and reliance on a "better summary" model just is not a luxury we can afford. The added latency overhead during prompting is also unacceptable.
In general:
system: at best 15% of token capacity
history: at best 15% of token capacity
prompt: at best 70% of token capacity.
we handle overflows by taking an aggressive path for two main cases.
1. Very large user prompt
- Likely uninterested in context, history, or even system prompt. This is a "standalone" prompt that highjacks the whole thread.
- We run this prompt on its own since a prompt that is over 70% of context window certainly is standalone.
2. Context window is exceeded in regular use.
- We do not touch prompt since it is very likely to be <70% of window.
- We check system prompt is not outrageous - if it is we cannonball it and keep context if present.
- We check a sliding window of history, only allowing up to 15% of the history to pass through if it fits, with a
preference for recent history if we can cannonball to fit it, otherwise it is omitted.
We end up with a rather large prompt that fits through a given window with a lot of room for response in most use-cases.
We also take the approach that history is the least important and most flexible of the items in this array of responses.
There is a supplemental version of this function that also returns a formatted string for models like Claude-2
*/
async function messageArrayCompressor(llm, messages = [], rawHistory = []) {
// assume the response will be at least 600 tokens. If the total prompt + reply is over we need to proactively
// run the compressor to ensure the prompt has enough space to reply.
// realistically - most users will not be impacted by this.
const tokenBuffer = 600;
const tokenManager = new TokenManager(llm.model);
// If no work needs to be done, just pass through.
if (tokenManager.statsFrom(messages) + tokenBuffer < llm.promptWindowLimit())
return messages;
const system = messages.shift();
const user = messages.pop();
const userPromptSize = tokenManager.countFromString(user.content);
// User prompt is the main focus here - we we prioritize it and allow
// it to highjack the entire conversation thread. We are going to
// cannonball the prompt through to ensure the reply has at least 20% of
// the token supply to reply with.
if (userPromptSize > llm.limits.user) {
return [
{
role: "user",
content: cannonball({
input: user.content,
targetTokenSize: llm.promptWindowLimit() * 0.8,
tiktokenInstance: tokenManager,
}),
},
];
}
const compressedSystem = new Promise(async (resolve) => {
const count = tokenManager.countFromString(system.content);
if (count < llm.limits.system) {
resolve(system);
return;
}
// Split context from system prompt - cannonball since its over the window.
// We assume the context + user prompt is enough tokens to fit.
const [prompt, context = ""] = system.content.split("Context:");
let compressedPrompt;
let compressedContext;
// If the user system prompt contribution's to the system prompt is more than
// 25% of the system limit, we will cannonball it - this favors the context
// over the instruction from the user.
if (tokenManager.countFromString(prompt) >= llm.limits.system * 0.25) {
compressedPrompt = cannonball({
input: prompt,
targetTokenSize: llm.limits.system * 0.25,
tiktokenInstance: tokenManager,
});
} else {
compressedPrompt = prompt;
}
if (tokenManager.countFromString(context) >= llm.limits.system * 0.75) {
compressedContext = cannonball({
input: context,
targetTokenSize: llm.limits.system * 0.75,
tiktokenInstance: tokenManager,
});
} else {
compressedContext = context;
}
system.content = `${compressedPrompt}${
compressedContext ? `\nContext: ${compressedContext}` : ""
}`;
resolve(system);
});
// Prompt is allowed to take up to 70% of window - we know its under
// if we are here, so passthrough.
const compressedPrompt = new Promise(async (resolve) => resolve(user));
// We always aggressively compress history because it is the least
// important data to retain in full-fidelity.
const compressedHistory = new Promise((resolve) => {
const eligibleHistoryItems = [];
var historyTokenCount = 0;
for (const [i, history] of rawHistory.reverse().entries()) {
const [user, assistant] = convertToPromptHistory([history]);
const [userTokens, assistantTokens] = [
tokenManager.countFromString(user.content),
tokenManager.countFromString(assistant.content),
];
const total = userTokens + assistantTokens;
// If during the loop the token cost of adding this history
// is small, we can add it to history and move onto next.
if (historyTokenCount + total < llm.limits.history) {
eligibleHistoryItems.unshift(user, assistant);
historyTokenCount += total;
continue;
}
// If we reach here the overhead of adding this history item will
// be too much of the limit. So now, we are prioritizing
// the most recent 3 message pairs - if we are already past those - exit loop and stop
// trying to make history work.
if (i > 2) break;
// We are over the limit and we are within the first 3 most recent chats.
// so now we cannonball them to make them fit into the window.
// max size = llm.limit.history; Each component of the message, can at most
// be 50% of the history. We cannonball whichever is the problem.
// The math isnt perfect for tokens, so we have to add a fudge factor for safety.
const maxTargetSize = Math.floor(llm.limits.history / 2.2);
if (userTokens > maxTargetSize) {
user.content = cannonball({
input: user.content,
targetTokenSize: maxTargetSize,
tiktokenInstance: tokenManager,
});
}
if (assistantTokens > maxTargetSize) {
assistant.content = cannonball({
input: assistant.content,
targetTokenSize: maxTargetSize,
tiktokenInstance: tokenManager,
});
}
const newTotal = tokenManager.statsFrom([user, assistant]);
if (historyTokenCount + newTotal > llm.limits.history) continue;
eligibleHistoryItems.unshift(user, assistant);
historyTokenCount += newTotal;
}
resolve(eligibleHistoryItems);
});
const [cSystem, cHistory, cPrompt] = await Promise.all([
compressedSystem,
compressedHistory,
compressedPrompt,
]);
return [cSystem, ...cHistory, cPrompt];
}
// Implementation of messageArrayCompressor, but for string only completion models
async function messageStringCompressor(llm, promptArgs = {}, rawHistory = []) {
const tokenBuffer = 600;
const tokenManager = new TokenManager(llm.model);
const initialPrompt = llm.constructPrompt(promptArgs);
if (
tokenManager.statsFrom(initialPrompt) + tokenBuffer <
llm.promptWindowLimit()
)
return initialPrompt;
const system = promptArgs.systemPrompt;
const user = promptArgs.userPrompt;
const userPromptSize = tokenManager.countFromString(user);
// User prompt is the main focus here - we we prioritize it and allow
// it to highjack the entire conversation thread. We are going to
// cannonball the prompt through to ensure the reply has at least 20% of
// the token supply to reply with.
if (userPromptSize > llm.limits.user) {
return llm.constructPrompt({
userPrompt: cannonball({
input: user,
targetTokenSize: llm.promptWindowLimit() * 0.8,
tiktokenInstance: tokenManager,
}),
});
}
const compressedSystem = new Promise(async (resolve) => {
const count = tokenManager.countFromString(system);
if (count < llm.limits.system) {
resolve(system);
return;
}
resolve(
cannonball({
input: system,
targetTokenSize: llm.limits.system,
tiktokenInstance: tokenManager,
})
);
});
// Prompt is allowed to take up to 70% of window - we know its under
// if we are here, so passthrough.
const compressedPrompt = new Promise(async (resolve) => resolve(user));
// We always aggressively compress history because it is the least
// important data to retain in full-fidelity.
const compressedHistory = new Promise((resolve) => {
const eligibleHistoryItems = [];
var historyTokenCount = 0;
for (const [i, history] of rawHistory.reverse().entries()) {
const [user, assistant] = convertToPromptHistory([history]);
const [userTokens, assistantTokens] = [
tokenManager.countFromString(user.content),
tokenManager.countFromString(assistant.content),
];
const total = userTokens + assistantTokens;
// If during the loop the token cost of adding this history
// is small, we can add it to history and move onto next.
if (historyTokenCount + total < llm.limits.history) {
eligibleHistoryItems.unshift(user, assistant);
historyTokenCount += total;
continue;
}
// If we reach here the overhead of adding this history item will
// be too much of the limit. So now, we are prioritizing
// the most recent 3 message pairs - if we are already past those - exit loop and stop
// trying to make history work.
if (i > 2) break;
// We are over the limit and we are within the first 3 most recent chats.
// so now we cannonball them to make them fit into the window.
// max size = llm.limit.history; Each component of the message, can at most
// be 50% of the history. We cannonball whichever is the problem.
// The math isnt perfect for tokens, so we have to add a fudge factor for safety.
const maxTargetSize = Math.floor(llm.limits.history / 2.2);
if (userTokens > maxTargetSize) {
user.content = cannonball({
input: user.content,
targetTokenSize: maxTargetSize,
tiktokenInstance: tokenManager,
});
}
if (assistantTokens > maxTargetSize) {
assistant.content = cannonball({
input: assistant.content,
targetTokenSize: maxTargetSize,
tiktokenInstance: tokenManager,
});
}
const newTotal = tokenManager.statsFrom([user, assistant]);
if (historyTokenCount + newTotal > llm.limits.history) continue;
eligibleHistoryItems.unshift(user, assistant);
historyTokenCount += newTotal;
}
resolve(eligibleHistoryItems);
});
const [cSystem, cHistory, cPrompt] = await Promise.all([
compressedSystem,
compressedHistory,
compressedPrompt,
]);
return llm.constructPrompt({
systemPrompt: cSystem,
contextTexts: promptArgs?.contextTexts || [],
chatHistory: cHistory,
userPrompt: cPrompt,
});
}
// Cannonball prompting: aka where we shoot a proportionally big cannonball through a proportional large prompt
// Nobody should be sending prompts this big, but there is no reason we shouldn't allow it if results are good even by doing it.
function cannonball({
input = "",
targetTokenSize = 0,
tiktokenInstance = null,
ellipsesStr = null,
}) {
if (!input || !targetTokenSize) return input;
const tokenManager = tiktokenInstance || new TokenManager();
const truncText = ellipsesStr || "\n\n--prompt truncated for brevity--\n\n";
const initialInputSize = tokenManager.countFromString(input);
if (initialInputSize < targetTokenSize) return input;
// if the delta is the token difference between where our prompt is in size
// and where we ideally need to land.
const delta = initialInputSize - targetTokenSize;
const tokenChunks = tokenManager.tokensFromString(input);
const middleIdx = Math.floor(tokenChunks.length / 2);
// middle truncate the text going left and right of midpoint
const leftChunks = tokenChunks.slice(0, middleIdx - Math.round(delta / 2));
const rightChunks = tokenChunks.slice(middleIdx + Math.round(delta / 2));
const truncatedText =
tokenManager.bytesFromTokens(leftChunks) +
truncText +
tokenManager.bytesFromTokens(rightChunks);
console.log(
`Cannonball results ${initialInputSize} -> ${tokenManager.countFromString(
truncatedText
)} tokens.`
);
return truncatedText;
}
/**
* Fill the sources window with the priority of
* 1. Pinned documents (handled prior to function)
* 2. VectorSearch results
* 3. prevSources in chat history - starting from most recent.
*
* Ensuring the window always has the desired amount of sources so that followup questions
* in any chat mode have relevant sources, but not infinite sources. This function is used during chatting
* and allows follow-up questions within a query chat that otherwise would have zero sources and would fail.
* The added benefit is that during regular RAG chat, we have better coherence of citations that otherwise would
* also yield no results with no need for a ReRanker to run and take much longer to return a response.
*
* The side effect of this is follow-up unrelated questions now have citations that would look totally irrelevant, however
* we would rather optimize on the correctness of a response vs showing extraneous sources during a response. Given search
* results always take a priority a good unrelated question that produces RAG results will still function as desired and due to previous
* history backfill sources "changing context" mid-chat is handled appropriately.
* example:
* ---previous implementation---
* prompt 1: "What is anythingllm?" -> possibly get 4 good sources
* prompt 2: "Tell me some features" -> possible get 0 - 1 maybe relevant source + previous answer response -> bad response due to bad context mgmt
* ---next implementation---
* prompt 1: "What is anythingllm?" -> possibly get 4 good sources
* prompt 2: "Tell me some features" -> possible get 0 - 1 maybe relevant source + previous answer response -> backfill with 3 good sources from previous -> much better response
*
* @param {Object} config - params to call
* @param {object} config.nDocs = fill size of the window
* @param {object} config.searchResults = vector `similarityResponse` results for .sources
* @param {object[]} config.history - rawHistory of chat containing sources
* @param {string[]} config.filterIdentifiers - Pinned document identifiers to prevent duplicate context
* @returns {{
* contextTexts: string[],
* sources: object[],
* }} - Array of sources that should be added to window
*/
function fillSourceWindow({
nDocs = 4, // Number of documents
searchResults = [], // Sources from similarity search
history = [], // Raw history
filterIdentifiers = [], // pinned document sources
} = config) {
const sources = [...searchResults];
if (sources.length >= nDocs || history.length === 0) {
return {
sources,
contextTexts: sources.map((src) => src.text),
};
}
const log = (text, ...args) => {
console.log(`\x1b[36m[fillSourceWindow]\x1b[0m ${text}`, ...args);
};
log(
`Need to backfill ${nDocs - searchResults.length} chunks to fill in the source window for RAG!`
);
const seenChunks = new Set(searchResults.map((source) => source.id));
// We need to reverse again because we need to iterate from bottom of array (most recent chats)
// Looking at this function by itself you may think that this loop could be extreme for long history chats,
// but this was already handled where `history` we derived. This comes from `recentChatHistory` which
// includes a limit for history (default: 20). So this loop does not look as extreme as on first glance.
for (const chat of history.reverse()) {
if (sources.length >= nDocs) {
log(
`Citations backfilled to ${nDocs} references from ${searchResults.length} original citations.`
);
break;
}
const chatSources =
safeJsonParse(chat.response, { sources: [] })?.sources || [];
if (!chatSources?.length || !Array.isArray(chatSources)) continue;
const validSources = chatSources.filter((source) => {
return (
filterIdentifiers.includes(sourceIdentifier(source)) == false && // source cannot be in current pins
source.hasOwnProperty("score") && // source cannot have come from a pinned document that was previously pinned
source.hasOwnProperty("text") && // source has a valid text property we can use
seenChunks.has(source.id) == false // is unique
);
});
for (const validSource of validSources) {
if (sources.length >= nDocs) break;
sources.push(validSource);
seenChunks.add(validSource.id);
}
}
return {
sources,
contextTexts: sources.map((src) => src.text),
};
}
module.exports = {
messageArrayCompressor,
messageStringCompressor,
fillSourceWindow,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/helpers/chat/convertTo.js | server/utils/helpers/chat/convertTo.js | // Helpers that convert workspace chats to some supported format
// for external use by the user.
const { WorkspaceChats } = require("../../../models/workspaceChats");
const { EmbedChats } = require("../../../models/embedChats");
const { safeJsonParse } = require("../../http");
const { SystemSettings } = require("../../../models/systemSettings");
async function convertToCSV(preparedData) {
const headers = new Set(["id", "workspace", "prompt", "response", "sent_at"]);
preparedData.forEach((item) =>
Object.keys(item).forEach((key) => headers.add(key))
);
const rows = [Array.from(headers).join(",")];
for (const item of preparedData) {
const record = Array.from(headers)
.map((header) => {
const value = item[header] ?? "";
return escapeCsv(String(value));
})
.join(",");
rows.push(record);
}
return rows.join("\n");
}
async function convertToJSON(preparedData) {
return JSON.stringify(preparedData, null, 4);
}
// ref: https://raw.githubusercontent.com/gururise/AlpacaDataCleaned/main/alpaca_data.json
async function convertToJSONAlpaca(preparedData) {
return JSON.stringify(preparedData, null, 4);
}
// You can validate JSONL outputs on https://jsonlines.org/validator/
async function convertToJSONL(workspaceChatsMap) {
return Object.values(workspaceChatsMap)
.map((workspaceChats) => JSON.stringify(workspaceChats))
.join("\n");
}
async function prepareChatsForExport(format = "jsonl", chatType = "workspace") {
if (!exportMap.hasOwnProperty(format))
throw new Error(`Invalid export type: ${format}`);
let chats;
if (chatType === "workspace") {
chats = await WorkspaceChats.whereWithData({}, null, null, {
id: "asc",
});
} else if (chatType === "embed") {
chats = await EmbedChats.whereWithEmbedAndWorkspace(
{},
null,
{
id: "asc",
},
null
);
} else {
throw new Error(`Invalid chat type: ${chatType}`);
}
if (format === "csv" || format === "json") {
const preparedData = chats.map((chat) => {
const responseJson = safeJsonParse(chat.response, {});
const baseData = {
id: chat.id,
prompt: chat.prompt,
response: responseJson.text,
sent_at: chat.createdAt,
// Only add attachments to the json format since we cannot arrange attachments in csv format
...(format === "json"
? {
attachments:
responseJson.attachments?.length > 0
? responseJson.attachments.map((attachment) => ({
type: "image",
image: attachmentToDataUrl(attachment),
}))
: [],
}
: {}),
};
if (chatType === "embed") {
return {
...baseData,
workspace: chat.embed_config
? chat.embed_config.workspace.name
: "unknown workspace",
};
}
return {
...baseData,
workspace: chat.workspace ? chat.workspace.name : "unknown workspace",
username: chat.user
? chat.user.username
: chat.api_session_id !== null
? "API"
: "unknown user",
rating:
chat.feedbackScore === null
? "--"
: chat.feedbackScore
? "GOOD"
: "BAD",
};
});
return preparedData;
}
// jsonAlpaca format does not support array outputs
if (format === "jsonAlpaca") {
const preparedData = chats.map((chat) => {
const responseJson = safeJsonParse(chat.response, {});
return {
instruction: buildSystemPrompt(
chat,
chat.workspace ? chat.workspace.openAiPrompt : null
),
input: chat.prompt,
output: responseJson.text,
};
});
return preparedData;
}
// Export to JSONL format (recommended for fine-tuning)
const workspaceChatsMap = chats.reduce((acc, chat) => {
const { prompt, response, workspaceId } = chat;
const responseJson = safeJsonParse(response, { attachments: [] });
const attachments = responseJson.attachments;
if (!acc[workspaceId]) {
acc[workspaceId] = {
messages: [
{
role: "system",
content: [
{
type: "text",
text:
chat.workspace?.openAiPrompt ??
SystemSettings.saneDefaultSystemPrompt,
},
],
},
],
};
}
acc[workspaceId].messages.push(
{
role: "user",
content: [
{
type: "text",
text: prompt,
},
...(attachments?.length > 0
? attachments.map((attachment) => ({
type: "image",
image: attachmentToDataUrl(attachment),
}))
: []),
],
},
{
role: "assistant",
content: [
{
type: "text",
text: responseJson.text,
},
],
}
);
return acc;
}, {});
return workspaceChatsMap;
}
const exportMap = {
json: {
contentType: "application/json",
func: convertToJSON,
},
csv: {
contentType: "text/csv",
func: convertToCSV,
},
jsonl: {
contentType: "application/jsonl",
func: convertToJSONL,
},
jsonAlpaca: {
contentType: "application/json",
func: convertToJSONAlpaca,
},
};
function escapeCsv(str) {
if (str === null || str === undefined) return '""';
return `"${str.replace(/"/g, '""').replace(/\n/g, " ")}"`;
}
async function exportChatsAsType(format = "jsonl", chatType = "workspace") {
const { contentType, func } = exportMap.hasOwnProperty(format)
? exportMap[format]
: exportMap.jsonl;
const chats = await prepareChatsForExport(format, chatType);
return {
contentType,
data: await func(chats),
};
}
function buildSystemPrompt(chat, prompt = null) {
const sources = safeJsonParse(chat.response)?.sources || [];
const contextTexts = sources.map((source) => source.text);
const context =
sources.length > 0
? "\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
: "";
return `${prompt ?? SystemSettings.saneDefaultSystemPrompt}${context}`;
}
/**
* Converts an attachment's content string to a proper data URL format if needed
* @param {Object} attachment - The attachment object containing contentString and mime type
* @returns {string} The properly formatted data URL
*/
function attachmentToDataUrl(attachment) {
return attachment.contentString.startsWith("data:")
? attachment.contentString
: `data:${attachment.mime};base64,${attachment.contentString}`;
}
module.exports = {
prepareChatsForExport,
exportChatsAsType,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/helpers/admin/index.js | server/utils/helpers/admin/index.js | const { User } = require("../../../models/user");
const { ROLES } = require("../../middleware/multiUserProtected");
// When a user is updating or creating a user in multi-user, we need to check if they
// are allowed to do this and that the new or existing user will be at or below their permission level.
// the user executing this function should be an admin or manager.
function validRoleSelection(currentUser = {}, newUserParams = {}) {
if (!newUserParams.hasOwnProperty("role"))
return { valid: true, error: null }; // not updating role, so skip.
if (currentUser.role === ROLES.admin) return { valid: true, error: null };
if (currentUser.role === ROLES.manager) {
const validRoles = [ROLES.manager, ROLES.default];
if (!validRoles.includes(newUserParams.role))
return { valid: false, error: "Invalid role selection for user." };
return { valid: true, error: null };
}
return { valid: false, error: "Invalid condition for caller." };
}
// Check to make sure with this update that includes a role change to an existing admin to a non-admin
// that we still have at least one admin left or else they will lock themselves out.
async function canModifyAdmin(userToModify, updates) {
// if updates don't include role property
// or the user being modified isn't an admin currently
// or the updates role is equal to the users current role.
// skip validation.
if (!updates.hasOwnProperty("role")) return { valid: true, error: null };
if (userToModify.role !== ROLES.admin) return { valid: true, error: null };
if (updates.role === userToModify.role) return { valid: true, error: null };
const adminCount = await User.count({ role: ROLES.admin });
if (adminCount - 1 <= 0)
return {
valid: false,
error: "No system admins will remain if you do this. Update failed.",
};
return { valid: true, error: null };
}
function validCanModify(currentUser, existingUser) {
if (currentUser.role === ROLES.admin) return { valid: true, error: null };
if (currentUser.role === ROLES.manager) {
const validRoles = [ROLES.manager, ROLES.default];
if (!validRoles.includes(existingUser.role))
return { valid: false, error: "Cannot perform that action on user." };
return { valid: true, error: null };
}
return { valid: false, error: "Invalid condition for caller." };
}
module.exports = {
validCanModify,
validRoleSelection,
canModifyAdmin,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/agents/index.js | server/utils/agents/index.js | const AIbitat = require("./aibitat");
const AgentPlugins = require("./aibitat/plugins");
const {
WorkspaceAgentInvocation,
} = require("../../models/workspaceAgentInvocation");
const { User } = require("../../models/user");
const { WorkspaceChats } = require("../../models/workspaceChats");
const { safeJsonParse } = require("../http");
const { USER_AGENT, WORKSPACE_AGENT } = require("./defaults");
const ImportedPlugin = require("./imported");
const { AgentFlows } = require("../agentFlows");
const MCPCompatibilityLayer = require("../MCP");
class AgentHandler {
#invocationUUID;
#funcsToLoad = [];
invocation = null;
aibitat = null;
channel = null;
provider = null;
model = null;
constructor({ uuid }) {
this.#invocationUUID = uuid;
}
log(text, ...args) {
console.log(`\x1b[36m[AgentHandler]\x1b[0m ${text}`, ...args);
}
closeAlert() {
this.log(`End ${this.#invocationUUID}::${this.provider}:${this.model}`);
}
async #chatHistory(limit = 10) {
try {
const rawHistory = (
await WorkspaceChats.where(
{
workspaceId: this.invocation.workspace_id,
user_id: this.invocation.user_id || null,
thread_id: this.invocation.thread_id || null,
api_session_id: null,
include: true,
},
limit,
{ id: "desc" }
)
).reverse();
const agentHistory = [];
rawHistory.forEach((chatLog) => {
agentHistory.push(
{
from: USER_AGENT.name,
to: WORKSPACE_AGENT.name,
content: chatLog.prompt,
state: "success",
},
{
from: WORKSPACE_AGENT.name,
to: USER_AGENT.name,
content: safeJsonParse(chatLog.response)?.text || "",
state: "success",
}
);
});
return agentHistory;
} catch (e) {
this.log("Error loading chat history", e.message);
return [];
}
}
checkSetup() {
switch (this.provider) {
case "openai":
if (!process.env.OPEN_AI_KEY)
throw new Error("OpenAI API key must be provided to use agents.");
break;
case "anthropic":
if (!process.env.ANTHROPIC_API_KEY)
throw new Error("Anthropic API key must be provided to use agents.");
break;
case "lmstudio":
if (!process.env.LMSTUDIO_BASE_PATH)
throw new Error("LMStudio base path must be provided to use agents.");
break;
case "ollama":
if (!process.env.OLLAMA_BASE_PATH)
throw new Error("Ollama base path must be provided to use agents.");
break;
case "groq":
if (!process.env.GROQ_API_KEY)
throw new Error("Groq API key must be provided to use agents.");
break;
case "togetherai":
if (!process.env.TOGETHER_AI_API_KEY)
throw new Error("TogetherAI API key must be provided to use agents.");
break;
case "azure":
if (!process.env.AZURE_OPENAI_ENDPOINT || !process.env.AZURE_OPENAI_KEY)
throw new Error(
"Azure OpenAI API endpoint and key must be provided to use agents."
);
break;
case "koboldcpp":
if (!process.env.KOBOLD_CPP_BASE_PATH)
throw new Error(
"KoboldCPP must have a valid base path to use for the api."
);
break;
case "localai":
if (!process.env.LOCAL_AI_BASE_PATH)
throw new Error(
"LocalAI must have a valid base path to use for the api."
);
break;
case "openrouter":
if (!process.env.OPENROUTER_API_KEY)
throw new Error("OpenRouter API key must be provided to use agents.");
break;
case "mistral":
if (!process.env.MISTRAL_API_KEY)
throw new Error("Mistral API key must be provided to use agents.");
break;
case "generic-openai":
if (!process.env.GENERIC_OPEN_AI_BASE_PATH)
throw new Error("API base path must be provided to use agents.");
break;
case "perplexity":
if (!process.env.PERPLEXITY_API_KEY)
throw new Error("Perplexity API key must be provided to use agents.");
break;
case "textgenwebui":
if (!process.env.TEXT_GEN_WEB_UI_BASE_PATH)
throw new Error(
"TextWebGenUI API base path must be provided to use agents."
);
break;
case "bedrock":
if (
!process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID ||
!process.env.AWS_BEDROCK_LLM_ACCESS_KEY ||
!process.env.AWS_BEDROCK_LLM_REGION
)
throw new Error(
"AWS Bedrock Access Keys and region must be provided to use agents."
);
break;
case "fireworksai":
if (!process.env.FIREWORKS_AI_LLM_API_KEY)
throw new Error(
"FireworksAI API Key must be provided to use agents."
);
break;
case "deepseek":
if (!process.env.DEEPSEEK_API_KEY)
throw new Error("DeepSeek API Key must be provided to use agents.");
break;
case "litellm":
if (!process.env.LITE_LLM_BASE_PATH)
throw new Error(
"LiteLLM API base path and key must be provided to use agents."
);
break;
case "apipie":
if (!process.env.APIPIE_LLM_API_KEY)
throw new Error("ApiPie API Key must be provided to use agents.");
break;
case "xai":
if (!process.env.XAI_LLM_API_KEY)
throw new Error("xAI API Key must be provided to use agents.");
break;
case "zai":
if (!process.env.ZAI_API_KEY)
throw new Error("Z.AI API Key must be provided to use agents.");
break;
case "novita":
if (!process.env.NOVITA_LLM_API_KEY)
throw new Error("Novita API Key must be provided to use agents.");
break;
case "nvidia-nim":
if (!process.env.NVIDIA_NIM_LLM_BASE_PATH)
throw new Error(
"NVIDIA NIM base path must be provided to use agents."
);
break;
case "ppio":
if (!process.env.PPIO_API_KEY)
throw new Error("PPIO API Key must be provided to use agents.");
break;
case "gemini":
if (!process.env.GEMINI_API_KEY)
throw new Error("Gemini API key must be provided to use agents.");
break;
case "dpais":
if (!process.env.DPAIS_LLM_BASE_PATH)
throw new Error(
"Dell Pro AI Studio base path must be provided to use agents."
);
if (!process.env.DPAIS_LLM_MODEL_PREF)
throw new Error(
"Dell Pro AI Studio model must be set to use agents."
);
break;
case "moonshotai":
if (!process.env.MOONSHOT_AI_MODEL_PREF)
throw new Error("Moonshot AI model must be set to use agents.");
break;
case "cometapi":
if (!process.env.COMETAPI_LLM_API_KEY)
throw new Error("CometAPI API Key must be provided to use agents.");
break;
case "foundry":
if (!process.env.FOUNDRY_BASE_PATH)
throw new Error("Foundry base path must be provided to use agents.");
break;
case "giteeai":
if (!process.env.GITEE_AI_API_KEY)
throw new Error("GiteeAI API Key must be provided to use agents.");
break;
case "cohere":
if (!process.env.COHERE_API_KEY)
throw new Error("Cohere API key must be provided to use agents.");
break;
default:
throw new Error(
"No workspace agent provider set. Please set your agent provider in the workspace's settings"
);
}
}
/**
* Finds the default model for a given provider. If no default model is set for it's associated ENV then
* it will return a reasonable base model for the provider if one exists.
* @param {string} provider - The provider to find the default model for.
* @returns {string|null} The default model for the provider.
*/
providerDefault(provider = this.provider) {
switch (provider) {
case "openai":
return process.env.OPEN_MODEL_PREF ?? "gpt-4o";
case "anthropic":
return process.env.ANTHROPIC_MODEL_PREF ?? "claude-3-sonnet-20240229";
case "lmstudio":
return process.env.LMSTUDIO_MODEL_PREF ?? "server-default";
case "ollama":
return process.env.OLLAMA_MODEL_PREF ?? "llama3:latest";
case "groq":
return process.env.GROQ_MODEL_PREF ?? "llama3-70b-8192";
case "togetherai":
return (
process.env.TOGETHER_AI_MODEL_PREF ??
"mistralai/Mixtral-8x7B-Instruct-v0.1"
);
case "azure":
return process.env.OPEN_MODEL_PREF;
case "koboldcpp":
return process.env.KOBOLD_CPP_MODEL_PREF ?? null;
case "localai":
return process.env.LOCAL_AI_MODEL_PREF ?? null;
case "openrouter":
return process.env.OPENROUTER_MODEL_PREF ?? "openrouter/auto";
case "mistral":
return process.env.MISTRAL_MODEL_PREF ?? "mistral-medium";
case "generic-openai":
return process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
case "perplexity":
return process.env.PERPLEXITY_MODEL_PREF ?? "sonar-small-online";
case "textgenwebui":
return "text-generation-webui";
case "bedrock":
return process.env.AWS_BEDROCK_LLM_MODEL_PREFERENCE ?? null;
case "fireworksai":
return process.env.FIREWORKS_AI_LLM_MODEL_PREF ?? null;
case "deepseek":
return process.env.DEEPSEEK_MODEL_PREF ?? "deepseek-chat";
case "litellm":
return process.env.LITE_LLM_MODEL_PREF ?? null;
case "moonshotai":
return process.env.MOONSHOT_AI_MODEL_PREF ?? "moonshot-v1-32k";
case "apipie":
return process.env.APIPIE_LLM_MODEL_PREF ?? null;
case "xai":
return process.env.XAI_LLM_MODEL_PREF ?? "grok-beta";
case "zai":
return process.env.ZAI_MODEL_PREF ?? "glm-4.5";
case "novita":
return process.env.NOVITA_LLM_MODEL_PREF ?? "deepseek/deepseek-r1";
case "nvidia-nim":
return process.env.NVIDIA_NIM_LLM_MODEL_PREF ?? null;
case "ppio":
return process.env.PPIO_MODEL_PREF ?? "qwen/qwen2.5-32b-instruct";
case "gemini":
return process.env.GEMINI_LLM_MODEL_PREF ?? "gemini-2.0-flash-lite";
case "dpais":
return process.env.DPAIS_LLM_MODEL_PREF;
case "cometapi":
return process.env.COMETAPI_LLM_MODEL_PREF ?? "gpt-5-mini";
case "foundry":
return process.env.FOUNDRY_MODEL_PREF ?? null;
case "giteeai":
return process.env.GITEE_AI_MODEL_PREF ?? null;
case "cohere":
return process.env.COHERE_MODEL_PREF ?? "command-r-08-2024";
default:
return null;
}
}
/**
* Attempts to find a fallback provider and model to use if the workspace
* does not have an explicit `agentProvider` and `agentModel` set.
* 1. Fallback to the workspace `chatProvider` and `chatModel` if they exist.
* 2. Fallback to the system `LLM_PROVIDER` and try to load the associated default model via ENV params or a base available model.
* 3. Otherwise, return null - will likely throw an error the user can act on.
* @returns {object|null} - An object with provider and model keys.
*/
#getFallbackProvider() {
// First, fallback to the workspace chat provider and model if they exist
if (
this.invocation.workspace.chatProvider &&
this.invocation.workspace.chatModel
) {
return {
provider: this.invocation.workspace.chatProvider,
model: this.invocation.workspace.chatModel,
};
}
// If workspace does not have chat provider and model fallback
// to system provider and try to load provider default model
const systemProvider = process.env.LLM_PROVIDER;
const systemModel = this.providerDefault(systemProvider);
if (systemProvider && systemModel) {
return {
provider: systemProvider,
model: systemModel,
};
}
return null;
}
/**
* Finds or assumes the model preference value to use for API calls.
* If multi-model loading is supported, we use their agent model selection of the workspace
* If not supported, we attempt to fallback to the system provider value for the LLM preference
* and if that fails - we assume a reasonable base model to exist.
* @returns {string|null} the model preference value to use in API calls
*/
#fetchModel() {
// Provider was not explicitly set for workspace, so we are going to run our fallback logic
// that will set a provider and model for us to use.
if (!this.provider) {
const fallback = this.#getFallbackProvider();
if (!fallback) throw new Error("No valid provider found for the agent.");
this.provider = fallback.provider; // re-set the provider to the fallback provider so it is not null.
return fallback.model; // set its defined model based on fallback logic.
}
// The provider was explicitly set, so check if the workspace has an agent model set.
if (this.invocation.workspace.agentModel)
return this.invocation.workspace.agentModel;
// Otherwise, we have no model to use - so guess a default model to use via the provider
// and it's system ENV params and if that fails - we return either a base model or null.
return this.providerDefault();
}
#providerSetupAndCheck() {
this.provider = this.invocation.workspace.agentProvider ?? null; // set provider to workspace agent provider if it exists
this.model = this.#fetchModel();
if (!this.provider)
throw new Error("No valid provider found for the agent.");
this.log(`Start ${this.#invocationUUID}::${this.provider}:${this.model}`);
this.checkSetup();
}
async #validInvocation() {
const invocation = await WorkspaceAgentInvocation.getWithWorkspace({
uuid: String(this.#invocationUUID),
});
if (invocation?.closed)
throw new Error("This agent invocation is already closed");
this.invocation = invocation ?? null;
}
parseCallOptions(args, config = {}, pluginName) {
const callOpts = {};
for (const [param, definition] of Object.entries(config)) {
if (
definition.required &&
(!Object.prototype.hasOwnProperty.call(args, param) ||
args[param] === null)
) {
this.log(
`'${param}' required parameter for '${pluginName}' plugin is missing. Plugin may not function or crash agent.`
);
continue;
}
callOpts[param] = Object.prototype.hasOwnProperty.call(args, param)
? args[param]
: definition.default || null;
}
return callOpts;
}
async #attachPlugins(args) {
for (const name of this.#funcsToLoad) {
// Load child plugin
if (name.includes("#")) {
const [parent, childPluginName] = name.split("#");
if (!Object.prototype.hasOwnProperty.call(AgentPlugins, parent)) {
this.log(
`${parent} is not a valid plugin. Skipping inclusion to agent cluster.`
);
continue;
}
const childPlugin = AgentPlugins[parent].plugin.find(
(child) => child.name === childPluginName
);
if (!childPlugin) {
this.log(
`${parent} does not have child plugin named ${childPluginName}. Skipping inclusion to agent cluster.`
);
continue;
}
const callOpts = this.parseCallOptions(
args,
childPlugin?.startupConfig?.params,
name
);
this.aibitat.use(childPlugin.plugin(callOpts));
this.log(
`Attached ${parent}:${childPluginName} plugin to Agent cluster`
);
continue;
}
// Load flow plugin. This is marked by `@@flow_` in the array of functions to load.
if (name.startsWith("@@flow_")) {
const uuid = name.replace("@@flow_", "");
const plugin = AgentFlows.loadFlowPlugin(uuid, this.aibitat);
if (!plugin) {
this.log(
`Flow ${uuid} not found in flows directory. Skipping inclusion to agent cluster.`
);
continue;
}
this.aibitat.use(plugin.plugin());
this.log(
`Attached flow ${plugin.name} (${plugin.flowName}) plugin to Agent cluster`
);
continue;
}
// Load MCP plugin. This is marked by `@@mcp_` in the array of functions to load.
// All sub-tools are loaded here and are denoted by `pluginName:toolName` as their identifier.
// This will replace the parent MCP server plugin with the sub-tools as child plugins so they
// can be called directly by the agent when invoked.
// Since to get to this point, the `activeMCPServers` method has already been called, we can
// safely assume that the MCP server is running and the tools are available/loaded.
if (name.startsWith("@@mcp_")) {
const mcpPluginName = name.replace("@@mcp_", "");
const plugins =
await new MCPCompatibilityLayer().convertServerToolsToPlugins(
mcpPluginName,
this.aibitat
);
if (!plugins) {
this.log(
`MCP ${mcpPluginName} not found in MCP server config. Skipping inclusion to agent cluster.`
);
continue;
}
// Remove the old function from the agent functions directly
// and push the new ones onto the end of the array so that they are loaded properly.
this.aibitat.agents.get("@agent").functions = this.aibitat.agents
.get("@agent")
.functions.filter((f) => f.name !== name);
for (const plugin of plugins)
this.aibitat.agents.get("@agent").functions.push(plugin.name);
plugins.forEach((plugin) => {
this.aibitat.use(plugin.plugin());
this.log(
`Attached MCP::${plugin.toolName} MCP tool to Agent cluster`
);
});
continue;
}
// Load imported plugin. This is marked by `@@` in the array of functions to load.
// and is the @@hubID of the plugin.
if (name.startsWith("@@")) {
const hubId = name.replace("@@", "");
const valid = ImportedPlugin.validateImportedPluginHandler(hubId);
if (!valid) {
this.log(
`Imported plugin by hubId ${hubId} not found in plugin directory. Skipping inclusion to agent cluster.`
);
continue;
}
const plugin = ImportedPlugin.loadPluginByHubId(hubId);
const callOpts = plugin.parseCallOptions();
this.aibitat.use(plugin.plugin(callOpts));
this.log(
`Attached ${plugin.name} (${hubId}) imported plugin to Agent cluster`
);
continue;
}
// Load single-stage plugin.
if (!Object.prototype.hasOwnProperty.call(AgentPlugins, name)) {
this.log(
`${name} is not a valid plugin. Skipping inclusion to agent cluster.`
);
continue;
}
const callOpts = this.parseCallOptions(
args,
AgentPlugins[name].startupConfig.params
);
const AIbitatPlugin = AgentPlugins[name];
this.aibitat.use(AIbitatPlugin.plugin(callOpts));
this.log(`Attached ${name} plugin to Agent cluster`);
}
}
async #loadAgents() {
// Default User agent and workspace agent
this.log(`Attaching user and default agent to Agent cluster.`);
const user = this.invocation.user_id
? await User.get({ id: Number(this.invocation.user_id) })
: null;
const userAgentDef = await USER_AGENT.getDefinition();
const workspaceAgentDef = await WORKSPACE_AGENT.getDefinition(
this.provider,
this.invocation.workspace,
user
);
this.aibitat.agent(USER_AGENT.name, userAgentDef);
this.aibitat.agent(WORKSPACE_AGENT.name, workspaceAgentDef);
this.#funcsToLoad = [
...(userAgentDef?.functions || []),
...(workspaceAgentDef?.functions || []),
];
}
async init() {
await this.#validInvocation();
this.#providerSetupAndCheck();
return this;
}
async createAIbitat(
args = {
socket,
}
) {
this.aibitat = new AIbitat({
provider: this.provider ?? "openai",
model: this.model ?? "gpt-4o",
chats: await this.#chatHistory(20),
handlerProps: {
invocation: this.invocation,
log: this.log,
},
});
// Attach standard websocket plugin for frontend communication.
this.log(`Attached ${AgentPlugins.websocket.name} plugin to Agent cluster`);
this.aibitat.use(
AgentPlugins.websocket.plugin({
socket: args.socket,
muteUserReply: true,
introspection: true,
})
);
// Attach standard chat-history plugin for message storage.
this.log(
`Attached ${AgentPlugins.chatHistory.name} plugin to Agent cluster`
);
this.aibitat.use(AgentPlugins.chatHistory.plugin());
// Load required agents (Default + custom)
await this.#loadAgents();
// Attach all required plugins for functions to operate.
await this.#attachPlugins(args);
}
startAgentCluster() {
return this.aibitat.start({
from: USER_AGENT.name,
to: this.channel ?? WORKSPACE_AGENT.name,
content: this.invocation.prompt,
});
}
}
module.exports.AgentHandler = AgentHandler;
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/agents/imported.js | server/utils/agents/imported.js | const fs = require("fs");
const path = require("path");
const { safeJsonParse } = require("../http");
const { isWithin, normalizePath } = require("../files");
const { CollectorApi } = require("../collectorApi");
const pluginsPath =
process.env.NODE_ENV === "development"
? path.resolve(__dirname, "../../storage/plugins/agent-skills")
: path.resolve(process.env.STORAGE_DIR, "plugins", "agent-skills");
const sharedWebScraper = new CollectorApi();
class ImportedPlugin {
constructor(config) {
this.config = config;
this.handlerLocation = path.resolve(
pluginsPath,
this.config.hubId,
"handler.js"
);
delete require.cache[require.resolve(this.handlerLocation)];
this.handler = require(this.handlerLocation);
this.name = config.hubId;
this.startupConfig = {
params: {},
};
}
/**
* Gets the imported plugin handler.
* @param {string} hubId - The hub ID of the plugin.
* @returns {ImportedPlugin} - The plugin handler.
*/
static loadPluginByHubId(hubId) {
const configLocation = path.resolve(
pluginsPath,
normalizePath(hubId),
"plugin.json"
);
if (!this.isValidLocation(configLocation)) return;
const config = safeJsonParse(fs.readFileSync(configLocation, "utf8"));
return new ImportedPlugin(config);
}
static isValidLocation(pathToValidate) {
if (!isWithin(pluginsPath, pathToValidate)) return false;
if (!fs.existsSync(pathToValidate)) return false;
return true;
}
/**
* Checks if the plugin folder exists and if it does not, creates the folder.
*/
static checkPluginFolderExists() {
const dir = path.resolve(pluginsPath);
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
return;
}
/**
* Loads plugins from `plugins` folder in storage that are custom loaded and defined.
* only loads plugins that are active: true.
* @returns {string[]} - array of plugin names to be loaded later.
*/
static activeImportedPlugins() {
const plugins = [];
this.checkPluginFolderExists();
const folders = fs.readdirSync(path.resolve(pluginsPath));
for (const folder of folders) {
const configLocation = path.resolve(
pluginsPath,
normalizePath(folder),
"plugin.json"
);
if (!this.isValidLocation(configLocation)) continue;
const config = safeJsonParse(fs.readFileSync(configLocation, "utf8"));
if (config.active) plugins.push(`@@${config.hubId}`);
}
return plugins;
}
/**
* Lists all imported plugins.
* @returns {Array} - array of plugin configurations (JSON).
*/
static listImportedPlugins() {
const plugins = [];
this.checkPluginFolderExists();
if (!fs.existsSync(pluginsPath)) return plugins;
const folders = fs.readdirSync(path.resolve(pluginsPath));
for (const folder of folders) {
const configLocation = path.resolve(
pluginsPath,
normalizePath(folder),
"plugin.json"
);
if (!this.isValidLocation(configLocation)) continue;
const config = safeJsonParse(fs.readFileSync(configLocation, "utf8"));
plugins.push(config);
}
return plugins;
}
/**
* Updates a plugin configuration.
* @param {string} hubId - The hub ID of the plugin.
* @param {object} config - The configuration to update.
* @returns {object} - The updated configuration.
*/
static updateImportedPlugin(hubId, config) {
const configLocation = path.resolve(
pluginsPath,
normalizePath(hubId),
"plugin.json"
);
if (!this.isValidLocation(configLocation)) return;
const currentConfig = safeJsonParse(
fs.readFileSync(configLocation, "utf8"),
null
);
if (!currentConfig) return;
const updatedConfig = { ...currentConfig, ...config };
fs.writeFileSync(configLocation, JSON.stringify(updatedConfig, null, 2));
return updatedConfig;
}
/**
* Deletes a plugin. Removes the entire folder of the object.
* @param {string} hubId - The hub ID of the plugin.
* @returns {boolean} - True if the plugin was deleted, false otherwise.
*/
static deletePlugin(hubId) {
if (!hubId) throw new Error("No plugin hubID passed.");
const pluginFolder = path.resolve(pluginsPath, normalizePath(hubId));
if (!this.isValidLocation(pluginFolder)) return;
fs.rmSync(pluginFolder, { recursive: true });
return true;
}
/**
/**
* Validates if the handler.js file exists for the given plugin.
* @param {string} hubId - The hub ID of the plugin.
* @returns {boolean} - True if the handler.js file exists, false otherwise.
*/
static validateImportedPluginHandler(hubId) {
const handlerLocation = path.resolve(
pluginsPath,
normalizePath(hubId),
"handler.js"
);
return this.isValidLocation(handlerLocation);
}
parseCallOptions() {
const callOpts = {};
if (!this.config.setup_args || typeof this.config.setup_args !== "object") {
return callOpts;
}
for (const [param, definition] of Object.entries(this.config.setup_args)) {
if (definition.required && !definition?.value) {
console.log(
`'${param}' required value for '${this.name}' plugin is missing. Plugin may not function or crash agent.`
);
continue;
}
callOpts[param] = definition.value || definition.default || null;
}
return callOpts;
}
plugin(runtimeArgs = {}) {
const customFunctions = this.handler.runtime;
return {
runtimeArgs,
name: this.name,
config: this.config,
setup(aibitat) {
aibitat.function({
super: aibitat,
name: this.name,
config: this.config,
runtimeArgs: this.runtimeArgs,
description: this.config.description,
logger: aibitat?.handlerProps?.log || console.log, // Allows plugin to log to the console.
introspect: aibitat?.introspect || console.log, // Allows plugin to display a "thought" the chat window UI.
runtime: "docker",
webScraper: sharedWebScraper,
examples: this.config.examples ?? [],
parameters: {
$schema: "http://json-schema.org/draft-07/schema#",
type: "object",
properties: this.config.entrypoint.params ?? {},
additionalProperties: false,
},
...customFunctions,
});
},
};
}
/**
* Imports a community item from a URL.
* The community item is a zip file that contains a plugin.json file and handler.js file.
* This function will unzip the file and import the plugin into the agent-skills folder
* based on the hubId found in the plugin.json file.
* The zip file will be downloaded to the pluginsPath folder and then unzipped and finally deleted.
* @param {string} url - The signed URL of the community item zip file.
* @param {object} item - The community item.
* @returns {Promise<object>} - The result of the import.
*/
static async importCommunityItemFromUrl(url, item) {
this.checkPluginFolderExists();
const hubId = item.id;
if (!hubId) return { success: false, error: "No hubId passed to import." };
const zipFilePath = path.resolve(pluginsPath, `${item.id}.zip`);
const pluginFile = item.manifest.files.find(
(file) => file.name === "plugin.json"
);
if (!pluginFile)
return {
success: false,
error: "No plugin.json file found in manifest.",
};
const pluginFolder = path.resolve(pluginsPath, normalizePath(hubId));
if (fs.existsSync(pluginFolder))
console.log(
"ImportedPlugin.importCommunityItemFromUrl - plugin folder already exists - will overwrite"
);
try {
const protocol = new URL(url).protocol.replace(":", "");
const httpLib = protocol === "https" ? require("https") : require("http");
const downloadZipFile = new Promise(async (resolve) => {
try {
console.log(
"ImportedPlugin.importCommunityItemFromUrl - downloading asset from ",
new URL(url).origin
);
const zipFile = fs.createWriteStream(zipFilePath);
const request = httpLib.get(url, function (response) {
response.pipe(zipFile);
zipFile.on("finish", () => {
console.log(
"ImportedPlugin.importCommunityItemFromUrl - downloaded zip file"
);
resolve(true);
});
});
request.on("error", (error) => {
console.error(
"ImportedPlugin.importCommunityItemFromUrl - error downloading zip file: ",
error
);
resolve(false);
});
} catch (error) {
console.error(
"ImportedPlugin.importCommunityItemFromUrl - error downloading zip file: ",
error
);
resolve(false);
}
});
const success = await downloadZipFile;
if (!success)
return { success: false, error: "Failed to download zip file." };
// Unzip the file to the plugin folder
// Note: https://github.com/cthackers/adm-zip?tab=readme-ov-file#electron-original-fs
const AdmZip = require("adm-zip");
const zip = new AdmZip(zipFilePath);
zip.extractAllTo(pluginFolder);
// We want to make sure specific keys are set to the proper values for
// plugin.json so we read and overwrite the file with the proper values.
const pluginJsonPath = path.resolve(pluginFolder, "plugin.json");
const pluginJson = safeJsonParse(fs.readFileSync(pluginJsonPath, "utf8"));
pluginJson.active = false;
pluginJson.hubId = hubId;
fs.writeFileSync(pluginJsonPath, JSON.stringify(pluginJson, null, 2));
console.log(
`ImportedPlugin.importCommunityItemFromUrl - successfully imported plugin to agent-skills/${hubId}`
);
return { success: true, error: null };
} catch (error) {
console.error(
"ImportedPlugin.importCommunityItemFromUrl - error: ",
error
);
return { success: false, error: error.message };
} finally {
if (fs.existsSync(zipFilePath)) fs.unlinkSync(zipFilePath);
}
}
}
module.exports = ImportedPlugin;
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/agents/ephemeral.js | server/utils/agents/ephemeral.js | const AIbitat = require("./aibitat");
const AgentPlugins = require("./aibitat/plugins");
const ImportedPlugin = require("./imported");
const MCPCompatibilityLayer = require("../MCP");
const { AgentFlows } = require("../agentFlows");
const { httpSocket } = require("./aibitat/plugins/http-socket.js");
const { User } = require("../../models/user");
const { WorkspaceChats } = require("../../models/workspaceChats");
const { safeJsonParse } = require("../http");
const {
USER_AGENT,
WORKSPACE_AGENT,
agentSkillsFromSystemSettings,
} = require("./defaults");
const { AgentHandler } = require(".");
const {
WorkspaceAgentInvocation,
} = require("../../models/workspaceAgentInvocation");
/**
* This is an instance and functional Agent handler, but it does not utilize
* sessions or websocket's and is instead a singular one-off agent run that does
* not persist between invocations
*/
class EphemeralAgentHandler extends AgentHandler {
/** @type {string|null} the unique identifier for the agent invocation */
#invocationUUID = null;
/** @type {import("@prisma/client").workspaces|null} the workspace to use for the agent */
#workspace = null;
/** @type {import("@prisma/client").users["id"]|null} the user id to use for the agent */
#userId = null;
/** @type {import("@prisma/client").workspace_threads|null} the workspace thread id to use for the agent */
#threadId = null;
/** @type {string|null} the session id to use for the agent */
#sessionId = null;
/** @type {string|null} the prompt to use for the agent */
#prompt = null;
/** @type {string[]} the functions to load into the agent (Aibitat plugins) */
#funcsToLoad = [];
/** @type {AIbitat|null} */
aibitat = null;
/** @type {string|null} */
channel = null;
/** @type {string|null} */
provider = null;
/** @type {string|null} the model to use for the agent */
model = null;
/**
* @param {{
* uuid: string,
* workspace: import("@prisma/client").workspaces,
* prompt: string,
* userId: import("@prisma/client").users["id"]|null,
* threadId: import("@prisma/client").workspace_threads["id"]|null,
* sessionId: string|null
* }} parameters
*/
constructor({
uuid,
workspace,
prompt,
userId = null,
threadId = null,
sessionId = null,
}) {
super({ uuid });
this.#invocationUUID = uuid;
this.#workspace = workspace;
this.#prompt = prompt;
// Note: userId for ephemeral agent is only available
// via the workspace-thread chat endpoints for the API
// since workspaces can belong to multiple users.
this.#userId = userId;
this.#threadId = threadId;
this.#sessionId = sessionId;
}
log(text, ...args) {
console.log(`\x1b[36m[EphemeralAgentHandler]\x1b[0m ${text}`, ...args);
}
closeAlert() {
this.log(`End ${this.#invocationUUID}::${this.provider}:${this.model}`);
}
async #chatHistory(limit = 10) {
try {
const rawHistory = (
await WorkspaceChats.where(
{
workspaceId: this.#workspace.id,
user_id: this.#userId || null,
thread_id: this.#threadId || null,
api_session_id: this.#sessionId,
include: true,
},
limit,
{ id: "desc" }
)
).reverse();
const agentHistory = [];
rawHistory.forEach((chatLog) => {
agentHistory.push(
{
from: USER_AGENT.name,
to: WORKSPACE_AGENT.name,
content: chatLog.prompt,
state: "success",
},
{
from: WORKSPACE_AGENT.name,
to: USER_AGENT.name,
content: safeJsonParse(chatLog.response)?.text || "",
state: "success",
}
);
});
return agentHistory;
} catch (e) {
this.log("Error loading chat history", e.message);
return [];
}
}
/**
* Attempts to find a fallback provider and model to use if the workspace
* does not have an explicit `agentProvider` and `agentModel` set.
* 1. Fallback to the workspace `chatProvider` and `chatModel` if they exist.
* 2. Fallback to the system `LLM_PROVIDER` and try to load the associated default model via ENV params or a base available model.
* 3. Otherwise, return null - will likely throw an error the user can act on.
* @returns {object|null} - An object with provider and model keys.
*/
#getFallbackProvider() {
// First, fallback to the workspace chat provider and model if they exist
if (this.#workspace.chatProvider && this.#workspace.chatModel) {
return {
provider: this.#workspace.chatProvider,
model: this.#workspace.chatModel,
};
}
// If workspace does not have chat provider and model fallback
// to system provider and try to load provider default model
const systemProvider = process.env.LLM_PROVIDER;
const systemModel = this.providerDefault(systemProvider);
if (systemProvider && systemModel) {
return {
provider: systemProvider,
model: systemModel,
};
}
return null;
}
/**
* Finds or assumes the model preference value to use for API calls.
* If multi-model loading is supported, we use their agent model selection of the workspace
* If not supported, we attempt to fallback to the system provider value for the LLM preference
* and if that fails - we assume a reasonable base model to exist.
* @returns {string|null} the model preference value to use in API calls
*/
#fetchModel() {
// Provider was not explicitly set for workspace, so we are going to run our fallback logic
// that will set a provider and model for us to use.
if (!this.provider) {
const fallback = this.#getFallbackProvider();
if (!fallback) throw new Error("No valid provider found for the agent.");
this.provider = fallback.provider; // re-set the provider to the fallback provider so it is not null.
return fallback.model; // set its defined model based on fallback logic.
}
// The provider was explicitly set, so check if the workspace has an agent model set.
if (this.#workspace.agentModel) return this.#workspace.agentModel;
// Otherwise, we have no model to use - so guess a default model to use via the provider
// and it's system ENV params and if that fails - we return either a base model or null.
return this.providerDefault();
}
#providerSetupAndCheck() {
this.provider = this.#workspace.agentProvider ?? null;
this.model = this.#fetchModel();
if (!this.provider)
throw new Error("No valid provider found for the agent.");
this.log(`Start ${this.#invocationUUID}::${this.provider}:${this.model}`);
this.checkSetup();
}
async #attachPlugins(args) {
for (const name of this.#funcsToLoad) {
// Load child plugin
if (name.includes("#")) {
const [parent, childPluginName] = name.split("#");
if (!AgentPlugins.hasOwnProperty(parent)) {
this.log(
`${parent} is not a valid plugin. Skipping inclusion to agent cluster.`
);
continue;
}
const childPlugin = AgentPlugins[parent].plugin.find(
(child) => child.name === childPluginName
);
if (!childPlugin) {
this.log(
`${parent} does not have child plugin named ${childPluginName}. Skipping inclusion to agent cluster.`
);
continue;
}
const callOpts = this.parseCallOptions(
args,
childPlugin?.startupConfig?.params,
name
);
this.aibitat.use(childPlugin.plugin(callOpts));
this.log(
`Attached ${parent}:${childPluginName} plugin to Agent cluster`
);
continue;
}
// Load flow plugin. This is marked by `@@flow_` in the array of functions to load.
if (name.startsWith("@@flow_")) {
const uuid = name.replace("@@flow_", "");
const plugin = AgentFlows.loadFlowPlugin(uuid, this.aibitat);
if (!plugin) {
this.log(
`Flow ${uuid} not found in flows directory. Skipping inclusion to agent cluster.`
);
continue;
}
this.aibitat.use(plugin.plugin());
this.log(
`Attached flow ${plugin.name} (${plugin.flowName}) plugin to Agent cluster`
);
continue;
}
// Load MCP plugin. This is marked by `@@mcp_` in the array of functions to load.
// All sub-tools are loaded here and are denoted by `pluginName:toolName` as their identifier.
// This will replace the parent MCP server plugin with the sub-tools as child plugins so they
// can be called directly by the agent when invoked.
// Since to get to this point, the `activeMCPServers` method has already been called, we can
// safely assume that the MCP server is running and the tools are available/loaded.
if (name.startsWith("@@mcp_")) {
const mcpPluginName = name.replace("@@mcp_", "");
const plugins =
await new MCPCompatibilityLayer().convertServerToolsToPlugins(
mcpPluginName,
this.aibitat
);
if (!plugins) {
this.log(
`MCP ${mcpPluginName} not found in MCP server config. Skipping inclusion to agent cluster.`
);
continue;
}
// Remove the old function from the agent functions directly
// and push the new ones onto the end of the array so that they are loaded properly.
this.aibitat.agents.get("@agent").functions = this.aibitat.agents
.get("@agent")
.functions.filter((f) => f.name !== name);
for (const plugin of plugins)
this.aibitat.agents.get("@agent").functions.push(plugin.name);
plugins.forEach((plugin) => {
this.aibitat.use(plugin.plugin());
this.log(
`Attached MCP::${plugin.toolName} MCP tool to Agent cluster`
);
});
continue;
}
// Load imported plugin. This is marked by `@@` in the array of functions to load.
// and is the @@hubID of the plugin.
if (name.startsWith("@@")) {
const hubId = name.replace("@@", "");
const valid = ImportedPlugin.validateImportedPluginHandler(hubId);
if (!valid) {
this.log(
`Imported plugin by hubId ${hubId} not found in plugin directory. Skipping inclusion to agent cluster.`
);
continue;
}
const plugin = ImportedPlugin.loadPluginByHubId(hubId);
const callOpts = plugin.parseCallOptions();
this.aibitat.use(plugin.plugin(callOpts));
this.log(
`Attached ${plugin.name} (${hubId}) imported plugin to Agent cluster`
);
continue;
}
// Load single-stage plugin.
if (!AgentPlugins.hasOwnProperty(name)) {
this.log(
`${name} is not a valid plugin. Skipping inclusion to agent cluster.`
);
continue;
}
const callOpts = this.parseCallOptions(
args,
AgentPlugins[name].startupConfig.params
);
const AIbitatPlugin = AgentPlugins[name];
this.aibitat.use(AIbitatPlugin.plugin(callOpts));
this.log(`Attached ${name} plugin to Agent cluster`);
}
}
async #loadAgents() {
// Default User agent and workspace agent
this.log(`Attaching user and default agent to Agent cluster.`);
this.aibitat.agent(USER_AGENT.name, USER_AGENT.getDefinition());
const user = this.#userId
? await User.get({ id: Number(this.#userId) })
: null;
this.aibitat.agent(
WORKSPACE_AGENT.name,
await WORKSPACE_AGENT.getDefinition(this.provider, this.#workspace, user)
);
this.#funcsToLoad = [
...(await agentSkillsFromSystemSettings()),
...ImportedPlugin.activeImportedPlugins(),
...AgentFlows.activeFlowPlugins(),
...(await new MCPCompatibilityLayer().activeMCPServers()),
];
}
async init() {
this.#providerSetupAndCheck();
return this;
}
async createAIbitat(
args = {
handler,
}
) {
this.aibitat = new AIbitat({
provider: this.provider ?? "openai",
model: this.model ?? "gpt-4o",
chats: await this.#chatHistory(20),
handlerProps: {
invocation: {
workspace: this.#workspace,
workspace_id: this.#workspace.id,
},
log: this.log,
},
});
// Attach HTTP response object if defined for chunk streaming.
this.log(`Attached ${httpSocket.name} plugin to Agent cluster`);
this.aibitat.use(
httpSocket.plugin({
handler: args.handler,
muteUserReply: true,
introspection: true,
})
);
// Load required agents (Default + custom)
await this.#loadAgents();
// Attach all required plugins for functions to operate.
await this.#attachPlugins(args);
}
startAgentCluster() {
return this.aibitat.start({
from: USER_AGENT.name,
to: this.channel ?? WORKSPACE_AGENT.name,
content: this.#prompt,
});
}
/**
* Determine if the message provided is an agent invocation.
* @param {{message:string}} parameters
* @returns {boolean}
*/
static isAgentInvocation({ message }) {
const agentHandles = WorkspaceAgentInvocation.parseAgents(message);
if (agentHandles.length > 0) return true;
return false;
}
}
const EventEmitter = require("node:events");
const { writeResponseChunk } = require("../helpers/chat/responses");
/**
* This is a special EventEmitter specifically used in the Aibitat agent handler
* that enables us to use HTTP to relay all .introspect and .send events back to an
* http handler instead of websockets, like we do on the frontend. This interface is meant to
* mock a websocket interface for the methods used and bind them to an HTTP method so that the developer
* API can invoke agent calls.
*/
class EphemeralEventListener extends EventEmitter {
messages = [];
constructor() {
super();
}
send(jsonData) {
const data = JSON.parse(jsonData);
this.messages.push(data);
this.emit("chunk", data);
}
close() {
this.emit("closed");
}
/**
* Compacts all messages in class and returns them in a condensed format.
* @returns {{thoughts: string[], textResponse: string}}
*/
packMessages() {
const thoughts = [];
let textResponse = null;
for (let msg of this.messages) {
if (msg.type !== "statusResponse") {
textResponse = msg.content;
} else {
thoughts.push(msg.content);
}
}
return { thoughts, textResponse };
}
/**
* Waits on the HTTP plugin to emit the 'closed' event from the agentHandler
* so that we can compact and return all the messages in the current queue.
* @returns {Promise<{thoughts: string[], textResponse: string}>}
*/
async waitForClose() {
return new Promise((resolve) => {
this.once("closed", () => resolve(this.packMessages()));
});
}
/**
* Streams the events with `writeResponseChunk` over HTTP chunked encoding
* and returns on the close event emission.
* ----------
* DevNote: Agents do not stream so in here we are simply
* emitting the thoughts and text response as soon as we get them.
* @param {import("express").Response} response
* @param {string} uuid - Unique identifier that is the same across chunks.
* @returns {Promise<{thoughts: string[], textResponse: string}>}
*/
async streamAgentEvents(response, uuid) {
const onChunkHandler = (data) => {
if (data.type === "statusResponse") {
return writeResponseChunk(response, {
id: uuid,
type: "agentThought",
thought: data.content,
sources: [],
attachments: [],
close: false,
error: null,
animate: true,
});
}
return writeResponseChunk(response, {
id: uuid,
type: "textResponse",
textResponse: data.content,
sources: [],
attachments: [],
close: true,
error: null,
animate: false,
});
};
this.on("chunk", onChunkHandler);
// Wait for close and after remove chunk listener
return this.waitForClose().then((closedResponse) => {
this.removeListener("chunk", onChunkHandler);
return closedResponse;
});
}
}
module.exports = { EphemeralAgentHandler, EphemeralEventListener };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/agents/defaults.js | server/utils/agents/defaults.js | const AgentPlugins = require("./aibitat/plugins");
const { SystemSettings } = require("../../models/systemSettings");
const { safeJsonParse } = require("../http");
const Provider = require("./aibitat/providers/ai-provider");
const ImportedPlugin = require("./imported");
const { AgentFlows } = require("../agentFlows");
const MCPCompatibilityLayer = require("../MCP");
const { SystemPromptVariables } = require("../../models/systemPromptVariables");
// This is a list of skills that are built-in and default enabled.
const DEFAULT_SKILLS = [
AgentPlugins.memory.name,
AgentPlugins.docSummarizer.name,
AgentPlugins.webScraping.name,
];
const USER_AGENT = {
name: "USER",
getDefinition: () => {
return {
interrupt: "ALWAYS",
role: "I am the human monitor and oversee this chat. Any questions on action or decision making should be directed to me.",
};
},
};
const WORKSPACE_AGENT = {
name: "@agent",
/**
* Get the definition for the workspace agent with its role (prompt) and functions in Aibitat format
* @param {string} provider
* @param {import("@prisma/client").workspaces | null} workspace
* @param {import("@prisma/client").users | null} user
* @returns {Promise<{ role: string, functions: object[] }>}
*/
getDefinition: async (provider = null, workspace = null, user = null) => {
return {
role: await Provider.systemPrompt({ provider, workspace, user }),
functions: [
...(await agentSkillsFromSystemSettings()),
...ImportedPlugin.activeImportedPlugins(),
...AgentFlows.activeFlowPlugins(),
...(await new MCPCompatibilityLayer().activeMCPServers()),
],
};
},
};
/**
* Fetches and preloads the names/identifiers for plugins that will be dynamically
* loaded later
* @returns {Promise<string[]>}
*/
async function agentSkillsFromSystemSettings() {
const systemFunctions = [];
// Load non-imported built-in skills that are configurable, but are default enabled.
const _disabledDefaultSkills = safeJsonParse(
await SystemSettings.getValueOrFallback(
{ label: "disabled_agent_skills" },
"[]"
),
[]
);
DEFAULT_SKILLS.forEach((skill) => {
if (!_disabledDefaultSkills.includes(skill))
systemFunctions.push(AgentPlugins[skill].name);
});
// Load non-imported built-in skills that are configurable.
const _setting = safeJsonParse(
await SystemSettings.getValueOrFallback(
{ label: "default_agent_skills" },
"[]"
),
[]
);
_setting.forEach((skillName) => {
if (!AgentPlugins.hasOwnProperty(skillName)) return;
// This is a plugin module with many sub-children plugins who
// need to be named via `${parent}#${child}` naming convention
if (Array.isArray(AgentPlugins[skillName].plugin)) {
for (const subPlugin of AgentPlugins[skillName].plugin) {
systemFunctions.push(
`${AgentPlugins[skillName].name}#${subPlugin.name}`
);
}
return;
}
// This is normal single-stage plugin
systemFunctions.push(AgentPlugins[skillName].name);
});
return systemFunctions;
}
module.exports = {
USER_AGENT,
WORKSPACE_AGENT,
agentSkillsFromSystemSettings,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/agents/aibitat/index.js | server/utils/agents/aibitat/index.js | const { EventEmitter } = require("events");
const { APIError } = require("./error.js");
const Providers = require("./providers/index.js");
const { Telemetry } = require("../../../models/telemetry.js");
const { v4 } = require("uuid");
/**
* AIbitat is a class that manages the conversation between agents.
* It is designed to solve a task with LLM.
*
* Guiding the chat through a graph of agents.
*/
class AIbitat {
emitter = new EventEmitter();
/**
* Temporary flag to skip the handleExecution function
* This is used to return the result of a flow execution directly to the chat
* without going through the handleExecution function (resulting in more LLM processing)
*
* Setting Skip execution to true will prevent any further tool calls from being executed.
* This is useful for flow executions that need to return a result directly to the chat but
* can also prevent tool-call chaining.
*
* @type {boolean}
*/
skipHandleExecution = false;
provider = null;
defaultProvider = null;
defaultInterrupt;
maxRounds;
_chats;
agents = new Map();
channels = new Map();
functions = new Map();
constructor(props = {}) {
const {
chats = [],
interrupt = "NEVER",
maxRounds = 100,
provider = "openai",
handlerProps = {}, // Inherited props we can spread so aibitat can access.
...rest
} = props;
this._chats = chats;
this.defaultInterrupt = interrupt;
this.maxRounds = maxRounds;
this.handlerProps = handlerProps;
this.defaultProvider = {
provider,
...rest,
};
this.provider = this.defaultProvider.provider;
this.model = this.defaultProvider.model;
}
/**
* Get the chat history between agents and channels.
*/
get chats() {
return this._chats;
}
/**
* Install a plugin.
*/
use(plugin) {
plugin.setup(this);
return this;
}
/**
* Add a new agent to the AIbitat.
*
* @param name
* @param config
* @returns
*/
agent(name = "", config = {}) {
this.agents.set(name, config);
return this;
}
/**
* Add a new channel to the AIbitat.
*
* @param name
* @param members
* @param config
* @returns
*/
channel(name = "", members = [""], config = {}) {
this.channels.set(name, {
members,
...config,
});
return this;
}
/**
* Get the specific agent configuration.
*
* @param agent The name of the agent.
* @throws When the agent configuration is not found.
* @returns The agent configuration.
*/
getAgentConfig(agent = "") {
const config = this.agents.get(agent);
if (!config) {
throw new Error(`Agent configuration "${agent}" not found`);
}
return {
role: "You are a helpful AI assistant.",
// role: `You are a helpful AI assistant.
// Solve tasks using your coding and language skills.
// In the following cases, suggest typescript code (in a typescript coding block) or shell script (in a sh coding block) for the user to execute.
// 1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.
// 2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly.
// Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.
// When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user.
// If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.
// If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
// When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible.
// Reply "TERMINATE" when everything is done.`,
...config,
};
}
/**
* Get the specific channel configuration.
*
* @param channel The name of the channel.
* @throws When the channel configuration is not found.
* @returns The channel configuration.
*/
getChannelConfig(channel = "") {
const config = this.channels.get(channel);
if (!config) {
throw new Error(`Channel configuration "${channel}" not found`);
}
return {
maxRounds: 10,
role: "",
...config,
};
}
/**
* Get the members of a group.
* @throws When the group is not defined as an array in the connections.
* @param node The name of the group.
* @returns The members of the group.
*/
getGroupMembers(node = "") {
const group = this.getChannelConfig(node);
return group.members;
}
/**
* Triggered when a plugin, socket, or command is aborted.
*
* @param listener
* @returns
*/
onAbort(listener = () => null) {
this.emitter.on("abort", listener);
return this;
}
/**
* Abort the running of any plugins that may still be pending (Langchain summarize)
*/
abort() {
this.emitter.emit("abort", null, this);
}
/**
* Triggered when a chat is terminated. After this, the chat can't be continued.
*
* @param listener
* @returns
*/
onTerminate(listener = () => null) {
this.emitter.on("terminate", listener);
return this;
}
/**
* Terminate the chat. After this, the chat can't be continued.
*
* @param node Last node to chat with
*/
terminate(node = "") {
this.emitter.emit("terminate", node, this);
}
/**
* Triggered when a chat is interrupted by a node.
*
* @param listener
* @returns
*/
onInterrupt(listener = () => null) {
this.emitter.on("interrupt", listener);
return this;
}
/**
* Interruption the chat.
*
* @param route The nodes that participated in the interruption.
* @returns
*/
interrupt(route) {
this._chats.push({
...route,
state: "interrupt",
});
this.emitter.emit("interrupt", route, this);
}
/**
* Triggered when a message is added to the chat history.
* This can either be the first message or a reply to a message.
*
* @param listener
* @returns
*/
onMessage(listener = (chat) => null) {
this.emitter.on("message", listener);
return this;
}
/**
* Register a new successful message in the chat history.
* This will trigger the `onMessage` event.
*
* @param message
*/
newMessage(message) {
const chat = {
...message,
state: "success",
};
this._chats.push(chat);
this.emitter.emit("message", chat, this);
}
/**
* Triggered when an error occurs during the chat.
*
* @param listener
* @returns
*/
onError(
listener = (
/**
* The error that occurred.
*
* Native errors are:
* - `APIError`
* - `AuthorizationError`
* - `UnknownError`
* - `RateLimitError`
* - `ServerError`
*/
error = null,
/**
* The message when the error occurred.
*/
{}
) => null
) {
this.emitter.on("replyError", listener);
return this;
}
/**
* Register an error in the chat history.
* This will trigger the `onError` event.
*
* @param route
* @param error
*/
newError(route, error) {
const chat = {
...route,
content: error instanceof Error ? error.message : String(error),
state: "error",
};
this._chats.push(chat);
this.emitter.emit("replyError", error, chat);
}
/**
* Triggered when a chat is interrupted by a node.
*
* @param listener
* @returns
*/
onStart(listener = (chat, aibitat) => null) {
this.emitter.on("start", listener);
return this;
}
/**
* Start a new chat.
*
* @param message The message to start the chat.
*/
async start(message) {
// register the message in the chat history
this.newMessage(message);
this.emitter.emit("start", message, this);
// ask the node to reply
await this.chat({
to: message.from,
from: message.to,
});
return this;
}
/**
* Recursively chat between two nodes.
*
* @param route
* @param keepAlive Whether to keep the chat alive.
*/
async chat(route, keepAlive = true) {
// check if the message is for a group
// if it is, select the next node to chat with from the group
// and then ask them to reply.
if (this.channels.get(route.from)) {
// select a node from the group
let nextNode;
try {
nextNode = await this.selectNext(route.from);
} catch (error) {
if (error instanceof APIError) {
return this.newError({ from: route.from, to: route.to }, error);
}
throw error;
}
if (!nextNode) {
// TODO: should it throw an error or keep the chat alive when there is no node to chat with in the group?
// maybe it should wrap up the chat and reply to the original node
// For now, it will terminate the chat
this.terminate(route.from);
return;
}
const nextChat = {
from: nextNode,
to: route.from,
};
if (this.shouldAgentInterrupt(nextNode)) {
this.interrupt(nextChat);
return;
}
// get chats only from the group's nodes
const history = this.getHistory({ to: route.from });
const group = this.getGroupMembers(route.from);
const rounds = history.filter((chat) => group.includes(chat.from)).length;
const { maxRounds } = this.getChannelConfig(route.from);
if (rounds >= maxRounds) {
this.terminate(route.to);
return;
}
await this.chat(nextChat);
return;
}
// If it's a direct message, reply to the message
let reply = "";
try {
reply = await this.reply(route);
} catch (error) {
if (error instanceof APIError) {
return this.newError({ from: route.from, to: route.to }, error);
}
throw error;
}
if (
reply === "TERMINATE" ||
this.hasReachedMaximumRounds(route.from, route.to)
) {
this.terminate(route.to);
return;
}
const newChat = { to: route.from, from: route.to };
if (
reply === "INTERRUPT" ||
(this.agents.get(route.to) && this.shouldAgentInterrupt(route.to))
) {
this.interrupt(newChat);
return;
}
if (keepAlive) {
// keep the chat alive by replying to the other node
await this.chat(newChat, true);
}
}
/**
* Check if the agent should interrupt the chat based on its configuration.
*
* @param agent
* @returns {boolean} Whether the agent should interrupt the chat.
*/
shouldAgentInterrupt(agent = "") {
const config = this.getAgentConfig(agent);
return this.defaultInterrupt === "ALWAYS" || config.interrupt === "ALWAYS";
}
/**
* Select the next node to chat with from a group. The node will be selected based on the history of chats.
* It will select the node that has not reached the maximum number of rounds yet and has not chatted with the channel in the last round.
* If it could not determine the next node, it will return a random node.
*
* @param channel The name of the group.
* @returns The name of the node to chat with.
*/
async selectNext(channel = "") {
// get all members of the group
const nodes = this.getGroupMembers(channel);
const channelConfig = this.getChannelConfig(channel);
// TODO: move this to when the group is created
// warn if the group is underpopulated
if (nodes.length < 3) {
console.warn(
`- Group (${channel}) is underpopulated with ${nodes.length} agents. Direct communication would be more efficient.`
);
}
// get the nodes that have not reached the maximum number of rounds
const availableNodes = nodes.filter(
(node) => !this.hasReachedMaximumRounds(channel, node)
);
// remove the last node that chatted with the channel, so it doesn't chat again
const lastChat = this._chats.filter((c) => c.to === channel).at(-1);
if (lastChat) {
const index = availableNodes.indexOf(lastChat.from);
if (index > -1) {
availableNodes.splice(index, 1);
}
}
// TODO: what should it do when there is no node to chat with?
if (!availableNodes.length) return;
// get the provider that will be used for the channel
// if the channel has a provider, use that otherwise
// use the GPT-4 because it has a better reasoning
const provider = this.getProviderForConfig({
// @ts-expect-error
model: "gpt-4",
...this.defaultProvider,
...channelConfig,
});
provider.attachHandlerProps(this.handlerProps);
const history = this.getHistory({ to: channel });
// build the messages to send to the provider
const messages = [
{
role: "system",
content: channelConfig.role,
},
{
role: "user",
content: `You are in a role play game. The following roles are available:
${availableNodes
.map((node) => `@${node}: ${this.getAgentConfig(node).role}`)
.join("\n")}.
Read the following conversation.
CHAT HISTORY
${history.map((c) => `@${c.from}: ${c.content}`).join("\n")}
Then select the next role from that is going to speak next.
Only return the role.
`,
},
];
// ask the provider to select the next node to chat with
// and remove the @ from the response
const { result } = await provider.complete(messages);
const name = result?.replace(/^@/g, "");
if (this.agents.get(name)) return name;
// if the name is not in the nodes, return a random node
return availableNodes[Math.floor(Math.random() * availableNodes.length)];
}
/**
*
* @param {string} pluginName this name of the plugin being called
* @returns string of the plugin to be called compensating for children denoted by # in the string.
* eg: sql-agent:list-database-connections
* or is a custom plugin
* eg: @@custom-plugin-name
*/
#parseFunctionName(pluginName = "") {
if (!pluginName.includes("#") && !pluginName.startsWith("@@"))
return pluginName;
if (pluginName.startsWith("@@")) return pluginName.replace("@@", "");
return pluginName.split("#")[1];
}
/**
* Check if the chat has reached the maximum number of rounds.
*/
hasReachedMaximumRounds(from = "", to = "") {
return this.getHistory({ from, to }).length >= this.maxRounds;
}
/**
* Get the chat history between two nodes or all chats to/from a node.
*
* @param route
* @returns
*/
getOrFormatNodeChatHistory(route) {
if (this.channels.get(route.to)) {
return [
{
role: "user",
content: `You are in a whatsapp group. Read the following conversation and then reply.
Do not add introduction or conclusion to your reply because this will be a continuous conversation. Don't introduce yourself.
CHAT HISTORY
${this.getHistory({ to: route.to })
.map((c) => `@${c.from}: ${c.content}`)
.join("\n")}
@${route.from}:`,
},
];
}
// This is normal chat between user<->agent
return this.getHistory(route).map((c) => ({
content: c.content,
role: c.from === route.to ? "user" : "assistant",
}));
}
/**
* Ask the for the AI provider to generate a reply to the chat.
* This will load the functions that the node can call and the chat history.
* Then before calling the provider, it will check if the provider supports agent streaming.
* If it does, it will call the provider asynchronously (streaming).
* Otherwise, it will call the provider synchronously (non-streaming).
* `.supportsAgentStreaming` is used to determine if the provider supports agent streaming on the respective provider.
*
* @param route.to The node that sent the chat.
* @param route.from The node that will reply to the chat.
*/
async reply(route) {
const fromConfig = this.getAgentConfig(route.from);
const chatHistory = this.getOrFormatNodeChatHistory(route);
const messages = [
{
content: fromConfig.role,
role: "system",
},
...chatHistory,
];
// get the functions that the node can call
const functions = fromConfig.functions
?.map((name) => this.functions.get(this.#parseFunctionName(name)))
.filter((a) => !!a);
const provider = this.getProviderForConfig({
...this.defaultProvider,
...fromConfig,
});
provider.attachHandlerProps(this.handlerProps);
let content;
if (provider.supportsAgentStreaming) {
this.handlerProps.log?.(
"[DEBUG] Provider supports agent streaming - will use async execution!"
);
content = await this.handleAsyncExecution(
provider,
messages,
functions,
route.from
);
} else {
this.handlerProps.log?.(
"[DEBUG] Provider does not support agent streaming - will use synchronous execution!"
);
content = await this.handleExecution(
provider,
messages,
functions,
route.from
);
}
this.newMessage({ ...route, content });
return content;
}
/**
* Handle the async (streaming) execution of the provider
* with tool calls.
*
* @param provider
* @param messages
* @param functions
* @param byAgent
*
* @returns {Promise<string>}
*/
async handleAsyncExecution(
provider,
messages = [],
functions = [],
byAgent = null
) {
const eventHandler = (type, data) => {
this?.socket?.send(type, data);
};
/** @type {{ functionCall: { name: string, arguments: string }, textResponse: string }} */
const completionStream = await provider.stream(
messages,
functions,
eventHandler
);
if (completionStream.functionCall) {
const { name, arguments: args } = completionStream.functionCall;
const fn = this.functions.get(name);
// if provider hallucinated on the function name
// ask the provider to complete again
if (!fn) {
return await this.handleAsyncExecution(
provider,
[
...messages,
{
name,
role: "function",
content: `Function "${name}" not found. Try again.`,
originalFunctionCall: completionStream.functionCall,
},
],
functions,
byAgent
);
}
// Execute the function and return the result to the provider
fn.caller = byAgent || "agent";
// If provider is verbose, log the tool call to the frontend
if (provider?.verbose) {
this?.introspect?.(
`${fn.caller} is executing \`${name}\` tool ${JSON.stringify(args, null, 2)}`
);
}
// Always log the tool call to the console for debugging purposes
this.handlerProps?.log?.(
`[debug]: ${fn.caller} is attempting to call \`${name}\` tool ${JSON.stringify(args, null, 2)}`
);
const result = await fn.handler(args);
Telemetry.sendTelemetry("agent_tool_call", { tool: name }, null, true);
/**
* If the tool call has direct output enabled, return the result directly to the chat
* without any further processing and no further tool calls will be run.
* For streaming, we need to return the result directly to the chat via the event handler
* or else no response will be sent to the chat.
*/
if (this.skipHandleExecution) {
this.skipHandleExecution = false; // reset the flag to prevent next tool call from being skipped
this?.introspect?.(
`The tool call has direct output enabled! The result will be returned directly to the chat without any further processing and no further tool calls will be run.`
);
this?.introspect?.(`Tool use completed.`);
this.handlerProps?.log?.(
`${fn.caller} tool call resulted in direct output! Returning raw result as string. NO MORE TOOL CALLS WILL BE EXECUTED.`
);
eventHandler?.("reportStreamEvent", {
type: "fullTextResponse",
uuid: v4(),
content: result,
});
return result;
}
return await this.handleAsyncExecution(
provider,
[
...messages,
{
name,
role: "function",
content: result,
originalFunctionCall: completionStream.functionCall,
},
],
functions,
byAgent
);
}
return completionStream?.textResponse;
}
/**
* Handle the synchronous (non-streaming) execution of the provider
* with tool calls.
*
* @param provider
* @param messages
* @param functions
* @param byAgent
*
* @returns {Promise<string>}
*/
async handleExecution(
provider,
messages = [],
functions = [],
byAgent = null
) {
// get the chat completion
const completion = await provider.complete(messages, functions);
if (completion.functionCall) {
const { name, arguments: args } = completion.functionCall;
const fn = this.functions.get(name);
// if provider hallucinated on the function name
// ask the provider to complete again
if (!fn) {
return await this.handleExecution(
provider,
[
...messages,
{
name,
role: "function",
content: `Function "${name}" not found. Try again.`,
originalFunctionCall: completion.functionCall,
},
],
functions,
byAgent
);
}
// Execute the function and return the result to the provider
fn.caller = byAgent || "agent";
// If provider is verbose, log the tool call to the frontend
if (provider?.verbose) {
this?.introspect?.(
`[debug]: ${fn.caller} is attempting to call \`${name}\` tool`
);
}
// Always log the tool call to the console for debugging purposes
this.handlerProps?.log?.(
`[debug]: ${fn.caller} is attempting to call \`${name}\` tool`
);
const result = await fn.handler(args);
Telemetry.sendTelemetry("agent_tool_call", { tool: name }, null, true);
// If the tool call has direct output enabled, return the result directly to the chat
// without any further processing and no further tool calls will be run.
if (this.skipHandleExecution) {
this.skipHandleExecution = false; // reset the flag to prevent next tool call from being skipped
this?.introspect?.(
`The tool call has direct output enabled! The result will be returned directly to the chat without any further processing and no further tool calls will be run.`
);
this?.introspect?.(`Tool use completed.`);
this.handlerProps?.log?.(
`${fn.caller} tool call resulted in direct output! Returning raw result as string. NO MORE TOOL CALLS WILL BE EXECUTED.`
);
return result;
}
return await this.handleExecution(
provider,
[
...messages,
{
name,
role: "function",
content: result,
originalFunctionCall: completion.functionCall,
},
],
functions,
byAgent
);
}
return completion?.textResponse;
}
/**
* Continue the chat from the last interruption.
* If the last chat was not an interruption, it will throw an error.
* Provide a feedback where it was interrupted if you want to.
*
* @param feedback The feedback to the interruption if any.
* @returns
*/
async continue(feedback) {
const lastChat = this._chats.at(-1);
if (!lastChat || lastChat.state !== "interrupt") {
throw new Error("No chat to continue");
}
// remove the last chat's that was interrupted
this._chats.pop();
const { from, to } = lastChat;
if (this.hasReachedMaximumRounds(from, to)) {
throw new Error("Maximum rounds reached");
}
if (feedback) {
const message = {
from,
to,
content: feedback,
};
// register the message in the chat history
this.newMessage(message);
// ask the node to reply
await this.chat({
to: message.from,
from: message.to,
});
} else {
await this.chat({ from, to });
}
return this;
}
/**
* Retry the last chat that threw an error.
* If the last chat was not an error, it will throw an error.
*/
async retry() {
const lastChat = this._chats.at(-1);
if (!lastChat || lastChat.state !== "error") {
throw new Error("No chat to retry");
}
// remove the last chat's that threw an error
const { from, to } = this?._chats?.pop();
await this.chat({ from, to });
return this;
}
/**
* Get the chat history between two nodes or all chats to/from a node.
*/
getHistory({ from, to }) {
return this._chats.filter((chat) => {
const isSuccess = chat.state === "success";
// return all chats to the node
if (!from) {
return isSuccess && chat.to === to;
}
// get all chats from the node
if (!to) {
return isSuccess && chat.from === from;
}
// check if the chat is between the two nodes
const hasSent = chat.from === from && chat.to === to;
const hasReceived = chat.from === to && chat.to === from;
const mutual = hasSent || hasReceived;
return isSuccess && mutual;
});
}
/**
* Get provider based on configurations.
* If the provider is a string, it will return the default provider for that string.
*
* @param config The provider configuration.
* @returns {Providers.OpenAIProvider} The provider instance.
*/
getProviderForConfig(config) {
if (typeof config.provider === "object") return config.provider;
switch (config.provider) {
case "openai":
return new Providers.OpenAIProvider({ model: config.model });
case "anthropic":
return new Providers.AnthropicProvider({ model: config.model });
case "lmstudio":
return new Providers.LMStudioProvider({ model: config.model });
case "ollama":
return new Providers.OllamaProvider({ model: config.model });
case "groq":
return new Providers.GroqProvider({ model: config.model });
case "togetherai":
return new Providers.TogetherAIProvider({ model: config.model });
case "azure":
return new Providers.AzureOpenAiProvider({ model: config.model });
case "koboldcpp":
return new Providers.KoboldCPPProvider({});
case "localai":
return new Providers.LocalAIProvider({ model: config.model });
case "openrouter":
return new Providers.OpenRouterProvider({ model: config.model });
case "mistral":
return new Providers.MistralProvider({ model: config.model });
case "generic-openai":
return new Providers.GenericOpenAiProvider({ model: config.model });
case "perplexity":
return new Providers.PerplexityProvider({ model: config.model });
case "textgenwebui":
return new Providers.TextWebGenUiProvider({});
case "bedrock":
return new Providers.AWSBedrockProvider({});
case "fireworksai":
return new Providers.FireworksAIProvider({ model: config.model });
case "nvidia-nim":
return new Providers.NvidiaNimProvider({ model: config.model });
case "moonshotai":
return new Providers.MoonshotAiProvider({ model: config.model });
case "deepseek":
return new Providers.DeepSeekProvider({ model: config.model });
case "litellm":
return new Providers.LiteLLMProvider({ model: config.model });
case "apipie":
return new Providers.ApiPieProvider({ model: config.model });
case "xai":
return new Providers.XAIProvider({ model: config.model });
case "zai":
return new Providers.ZAIProvider({ model: config.model });
case "novita":
return new Providers.NovitaProvider({ model: config.model });
case "ppio":
return new Providers.PPIOProvider({ model: config.model });
case "gemini":
return new Providers.GeminiProvider({ model: config.model });
case "dpais":
return new Providers.DellProAiStudioProvider({ model: config.model });
case "cometapi":
return new Providers.CometApiProvider({ model: config.model });
case "foundry":
return new Providers.FoundryProvider({ model: config.model });
case "giteeai":
return new Providers.GiteeAIProvider({ model: config.model });
case "cohere":
return new Providers.CohereProvider({ model: config.model });
default:
throw new Error(
`Unknown provider: ${config.provider}. Please use a valid provider.`
);
}
}
/**
* Register a new function to be called by the AIbitat agents.
* You are also required to specify the which node can call the function.
* @param functionConfig The function configuration.
*/
function(functionConfig) {
this.functions.set(functionConfig.name, functionConfig);
return this;
}
}
module.exports = AIbitat;
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/agents/aibitat/error.js | server/utils/agents/aibitat/error.js | class AIbitatError extends Error {}
class APIError extends AIbitatError {
constructor(message) {
super(message);
}
}
/**
* The error when the AI provider returns an error that should be treated as something
* that should be retried.
*/
class RetryError extends APIError {}
module.exports = {
APIError,
RetryError,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/agents/aibitat/utils/summarize.js | server/utils/agents/aibitat/utils/summarize.js | const { loadSummarizationChain } = require("langchain/chains");
const { PromptTemplate } = require("@langchain/core/prompts");
const { RecursiveCharacterTextSplitter } = require("@langchain/textsplitters");
const Provider = require("../providers/ai-provider");
/**
* @typedef {Object} LCSummarizationConfig
* @property {string} provider The LLM to use for summarization (inherited)
* @property {string} model The LLM Model to use for summarization (inherited)
* @property {AbortController['signal']} controllerSignal Abort controller to stop recursive summarization
* @property {string} content The text content of the text to summarize
*/
/**
* Summarize content using LLM LC-Chain call
* @param {LCSummarizationConfig} The LLM to use for summarization (inherited)
* @returns {Promise<string>} The summarized content.
*/
async function summarizeContent({
provider = "openai",
model = null,
controllerSignal,
content,
}) {
const llm = Provider.LangChainChatModel(provider, {
temperature: 0,
model: model,
});
const textSplitter = new RecursiveCharacterTextSplitter({
separators: ["\n\n", "\n"],
chunkSize: 10000,
chunkOverlap: 500,
});
const docs = await textSplitter.createDocuments([content]);
const mapPrompt = `
Write a detailed summary of the following text for a research purpose:
"{text}"
SUMMARY:
`;
const mapPromptTemplate = new PromptTemplate({
template: mapPrompt,
inputVariables: ["text"],
});
// This convenience function creates a document chain prompted to summarize a set of documents.
const chain = loadSummarizationChain(llm, {
type: "map_reduce",
combinePrompt: mapPromptTemplate,
combineMapPrompt: mapPromptTemplate,
verbose: process.env.NODE_ENV === "development",
});
const res = await chain.call({
...(controllerSignal ? { signal: controllerSignal } : {}),
input_documents: docs,
});
return res.text;
}
module.exports = { summarizeContent };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/agents/aibitat/utils/dedupe.js | server/utils/agents/aibitat/utils/dedupe.js | // Some models may attempt to call an expensive or annoying function many times and in that case we will want
// to implement some stateful tracking during that agent session. GPT4 and other more powerful models are smart
// enough to realize this, but models like 3.5 lack this. Open source models suffer greatly from this issue.
// eg: "save something to file..."
// agent -> saves
// agent -> saves
// agent -> saves
// agent -> saves
// ... do random # of times.
// We want to block all the reruns of a plugin, so we can add this to prevent that behavior from
// spamming the user (or other costly function) that have the exact same signatures.
// Track Run/isDuplicate prevents _exact_ data re-runs based on the SHA of their inputs
// StartCooldown/isOnCooldown does prevention of _near-duplicate_ runs based on only the function name that is running.
// isMarkedUnique/markUnique/removeUniqueConstraint prevents one-time functions from re-running. EG: charting.
const crypto = require("crypto");
const DEFAULT_COOLDOWN_MS = 30 * 1000;
class Deduplicator {
#hashes = {};
#cooldowns = {};
#uniques = {};
constructor() {}
log(message, ...args) {
console.log(`\x1b[36m[Deduplicator]\x1b[0m ${message}`, ...args);
}
trackRun(
key,
params = {},
options = {
cooldown: false,
cooldownInMs: DEFAULT_COOLDOWN_MS,
markUnique: false,
}
) {
const hash = crypto
.createHash("sha256")
.update(JSON.stringify({ key, params }))
.digest("hex");
this.#hashes[hash] = Number(new Date());
if (options.cooldown)
this.startCooldown(key, { cooldownInMs: options.cooldownInMs });
if (options.markUnique) this.markUnique(key);
}
/**
* Checks if a key and params are:
* - exactly the same as a previous run.
* - on cooldown.
* - marked as unique.
* @param {string} key - The key to check.
* @param {Object} params - The parameters to check.
* @returns {{isDuplicate: boolean, reason: string}} - The result of the check.
*/
isDuplicate(key, params = {}) {
const newSig = crypto
.createHash("sha256")
.update(JSON.stringify({ key, params }))
.digest("hex");
if (this.#hashes.hasOwnProperty(newSig))
return {
isDuplicate: true,
reason: `an exact duplicate of previous run of ${key}`,
};
if (this.isOnCooldown(key))
return {
isDuplicate: true,
reason: `the function is on cooldown for ${key}.`,
};
if (this.isMarkedUnique(key))
return {
isDuplicate: true,
reason: `the function is marked as unique for ${key}. Can only be called once per agent session.`,
};
return { isDuplicate: false, reason: "" };
}
/**
* Resets the object property for this instance of the Deduplicator class
* @param {('runs'|'cooldowns'|'uniques')} type - The type of prop to reset
*/
reset(type = "runs") {
switch (type) {
case "runs":
this.#hashes = {};
break;
case "cooldowns":
this.#cooldowns = {};
break;
case "uniques":
this.#uniques = {};
break;
}
return;
}
/**
* Starts a cooldown for a key.
* @param {string} key - The key to start the cooldown for (string key of the function name).
* @param {Object} parameters - The parameters for the cooldown.
* @param {number} parameters.cooldownInMs - The cooldown in milliseconds.
*/
startCooldown(
key,
parameters = {
cooldownInMs: DEFAULT_COOLDOWN_MS,
}
) {
const cooldownDelay = parameters.cooldownInMs || DEFAULT_COOLDOWN_MS;
this.log(`Starting cooldown for ${key} for ${cooldownDelay}ms`);
this.#cooldowns[key] = Number(new Date()) + Number(cooldownDelay);
}
/**
* Checks if a key is on cooldown.
* @param {string} key - The key to check.
* @returns {boolean} - True if the key is on cooldown, false otherwise.
*/
isOnCooldown(key) {
if (!this.#cooldowns.hasOwnProperty(key)) return false;
return Number(new Date()) <= this.#cooldowns[key];
}
/**
* Checks if a key is marked as unique and currently tracked by the deduplicator.
* @param {string} key - The key to check.
* @returns {boolean} - True if the key is marked as unique, false otherwise.
*/
isMarkedUnique(key) {
return this.#uniques.hasOwnProperty(key);
}
/**
* Removes the unique constraint for a key.
* @param {string} key - The key to remove the unique constraint for.
*/
removeUniqueConstraint(key) {
delete this.#uniques[key];
}
/**
* Marks a key as unique and currently tracked by the deduplicator.
* @param {string} key - The key to mark as unique.
*/
markUnique(key) {
this.#uniques[key] = Number(new Date());
}
}
module.exports.Deduplicator = Deduplicator;
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/agents/aibitat/plugins/websocket.js | server/utils/agents/aibitat/plugins/websocket.js | const chalk = require("chalk");
const { Telemetry } = require("../../../../models/telemetry");
const SOCKET_TIMEOUT_MS = 300 * 1_000; // 5 mins
/**
* Websocket Interface plugin. It prints the messages on the console and asks for feedback
* while the conversation is running in the background.
*/
// export interface AIbitatWebSocket extends ServerWebSocket<unknown> {
// askForFeedback?: any
// awaitResponse?: any
// handleFeedback?: (message: string) => void;
// }
const WEBSOCKET_BAIL_COMMANDS = [
"exit",
"/exit",
"stop",
"/stop",
"halt",
"/halt",
"/reset", // Will not reset but will bail. Powerusers always do this and the LLM responds.
];
const websocket = {
name: "websocket",
startupConfig: {
params: {
socket: {
required: true,
},
muteUserReply: {
required: false,
default: true,
},
introspection: {
required: false,
default: true,
},
},
},
plugin: function ({
socket, // @type AIbitatWebSocket
muteUserReply = true, // Do not post messages to "USER" back to frontend.
introspection = false, // when enabled will attach socket to Aibitat object with .introspect method which reports status updates to frontend.
}) {
return {
name: this.name,
setup(aibitat) {
aibitat.onError(async (error) => {
let errorMessage =
error?.message || "An error occurred while running the agent.";
console.error(chalk.red(` error: ${errorMessage}`), error);
aibitat.introspect(
`Error encountered while running: ${errorMessage}`
);
socket.send(
JSON.stringify({ type: "wssFailure", content: errorMessage })
);
aibitat.terminate();
});
aibitat.introspect = (messageText) => {
if (!introspection) return; // Dump thoughts when not wanted.
socket.send(
JSON.stringify({
type: "statusResponse",
content: messageText,
animate: true,
})
);
};
// expose function for sockets across aibitat
// type param must be set or else msg will not be shown or handled in UI.
aibitat.socket = {
send: (type = "__unhandled", content = "") => {
socket.send(JSON.stringify({ type, content }));
},
};
// aibitat.onStart(() => {
// console.log("π starting chat ...");
// });
aibitat.onMessage((message) => {
if (message.from !== "USER")
Telemetry.sendTelemetry("agent_chat_sent");
if (message.from === "USER" && muteUserReply) return;
socket.send(JSON.stringify(message));
});
aibitat.onTerminate(() => {
// console.log("π chat finished");
socket.close();
});
aibitat.onInterrupt(async (node) => {
const feedback = await socket.askForFeedback(socket, node);
if (WEBSOCKET_BAIL_COMMANDS.includes(feedback)) {
socket.close();
return;
}
await aibitat.continue(feedback);
});
/**
* Socket wait for feedback on socket
*
* @param socket The content to summarize. // AIbitatWebSocket & { receive: any, echo: any }
* @param node The chat node // { from: string; to: string }
* @returns The summarized content.
*/
socket.askForFeedback = (socket, node) => {
socket.awaitResponse = (question = "waiting...") => {
socket.send(JSON.stringify({ type: "WAITING_ON_INPUT", question }));
return new Promise(function (resolve) {
let socketTimeout = null;
socket.handleFeedback = (message) => {
const data = JSON.parse(message);
if (data.type !== "awaitingFeedback") return;
delete socket.handleFeedback;
clearTimeout(socketTimeout);
resolve(data.feedback);
return;
};
socketTimeout = setTimeout(() => {
console.log(
chalk.red(
`Client took too long to respond, chat thread is dead after ${SOCKET_TIMEOUT_MS}ms`
)
);
resolve("exit");
return;
}, SOCKET_TIMEOUT_MS);
});
};
return socket.awaitResponse(`Provide feedback to ${chalk.yellow(
node.to
)} as ${chalk.yellow(node.from)}.
Press enter to skip and use auto-reply, or type 'exit' to end the conversation: \n`);
};
// console.log("π WS plugin is complete.");
},
};
},
};
module.exports = {
websocket,
WEBSOCKET_BAIL_COMMANDS,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/agents/aibitat/plugins/cli.js | server/utils/agents/aibitat/plugins/cli.js | // Plugin CAN ONLY BE USE IN DEVELOPMENT.
const { input } = require("@inquirer/prompts");
const chalk = require("chalk");
const { RetryError } = require("../error");
/**
* Command-line Interface plugin. It prints the messages on the console and asks for feedback
* while the conversation is running in the background.
*/
const cli = {
name: "cli",
startupConfig: {
params: {},
},
plugin: function ({ simulateStream = true } = {}) {
return {
name: this.name,
setup(aibitat) {
let printing = [];
aibitat.onError(async (error) => {
let errorMessage =
error?.message || "An error occurred while running the agent.";
console.error(chalk.red(` error: ${errorMessage}`), error);
});
aibitat.onStart(() => {
console.log();
console.log("π starting chat ...\n");
printing = [Promise.resolve()];
});
aibitat.onMessage(async (message) => {
const next = new Promise(async (resolve) => {
await Promise.all(printing);
await this.print(message, simulateStream);
resolve();
});
printing.push(next);
});
aibitat.onTerminate(async () => {
await Promise.all(printing);
console.log("π chat finished");
});
aibitat.onInterrupt(async (node) => {
await Promise.all(printing);
const feedback = await this.askForFeedback(node);
// Add an extra line after the message
console.log();
if (feedback === "exit") {
console.log("π chat finished");
return process.exit(0);
}
await aibitat.continue(feedback);
});
},
/**
* Print a message on the terminal
*
* @param message
* // message Type { from: string; to: string; content?: string } & {
state: 'loading' | 'error' | 'success' | 'interrupt'
}
* @param simulateStream
*/
print: async function (message = {}, simulateStream = true) {
const replying = chalk.dim(`(to ${message.to})`);
const reference = `${chalk.magenta("β")} ${chalk.bold(
message.from
)} ${replying}:`;
if (!simulateStream) {
console.log(reference);
console.log(message.content);
// Add an extra line after the message
console.log();
return;
}
process.stdout.write(`${reference}\n`);
// Emulate streaming by breaking the cached response into chunks
const chunks = message.content?.split(" ") || [];
const stream = new ReadableStream({
async start(controller) {
for (const chunk of chunks) {
const bytes = new TextEncoder().encode(chunk + " ");
controller.enqueue(bytes);
await new Promise((r) =>
setTimeout(
r,
// get a random number between 10ms and 50ms to simulate a random delay
Math.floor(Math.random() * 40) + 10
)
);
}
controller.close();
},
});
// Stream the response to the chat
for await (const chunk of stream) {
process.stdout.write(new TextDecoder().decode(chunk));
}
// Add an extra line after the message
console.log();
console.log();
},
/**
* Ask for feedback to the user using the terminal
*
* @param node //{ from: string; to: string }
* @returns
*/
askForFeedback: function (node = {}) {
return input({
message: `Provide feedback to ${chalk.yellow(
node.to
)} as ${chalk.yellow(
node.from
)}. Press enter to skip and use auto-reply, or type 'exit' to end the conversation: `,
});
},
};
},
};
module.exports = { cli };
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Mintplex-Labs/anything-llm | https://github.com/Mintplex-Labs/anything-llm/blob/e287fab56089cf8fcea9ba579a3ecdeca0daa313/server/utils/agents/aibitat/plugins/index.js | server/utils/agents/aibitat/plugins/index.js | const { webBrowsing } = require("./web-browsing.js");
const { webScraping } = require("./web-scraping.js");
const { websocket } = require("./websocket.js");
const { docSummarizer } = require("./summarize.js");
const { saveFileInBrowser } = require("./save-file-browser.js");
const { chatHistory } = require("./chat-history.js");
const { memory } = require("./memory.js");
const { rechart } = require("./rechart.js");
const { sqlAgent } = require("./sql-agent/index.js");
module.exports = {
webScraping,
webBrowsing,
websocket,
docSummarizer,
saveFileInBrowser,
chatHistory,
memory,
rechart,
sqlAgent,
// Plugin name aliases so they can be pulled by slug as well.
[webScraping.name]: webScraping,
[webBrowsing.name]: webBrowsing,
[websocket.name]: websocket,
[docSummarizer.name]: docSummarizer,
[saveFileInBrowser.name]: saveFileInBrowser,
[chatHistory.name]: chatHistory,
[memory.name]: memory,
[rechart.name]: rechart,
[sqlAgent.name]: sqlAgent,
};
| javascript | MIT | e287fab56089cf8fcea9ba579a3ecdeca0daa313 | 2026-01-04T14:57:11.963777Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.