Add logic to reply comments

This commit is contained in:
Ken Yasue
2025-07-21 15:06:09 +02:00
parent 483d69a8e9
commit efaa49ad4d
8 changed files with 659 additions and 4 deletions

141
package-lock.json generated
View File

@ -9,6 +9,7 @@
"version": "1.0.0",
"license": "ISC",
"dependencies": {
"@lmstudio/sdk": "^1.3.0",
"@types/axios": "^0.9.36",
"axios": "^1.10.0",
"dotenv": "^17.2.0",
@ -64,6 +65,28 @@
"@jridgewell/sourcemap-codec": "^1.4.10"
}
},
"node_modules/@lmstudio/lms-isomorphic": {
"version": "0.4.6",
"resolved": "https://registry.npmjs.org/@lmstudio/lms-isomorphic/-/lms-isomorphic-0.4.6.tgz",
"integrity": "sha512-v0LIjXKnDe3Ff3XZO5eQjlVxTjleUHXaom14MV7QU9bvwaoo3l5p71+xJ3mmSaqZq370CQ6pTKCn1Bb7Jf+VwQ==",
"license": "Apache-2.0",
"dependencies": {
"ws": "^8.16.0"
}
},
"node_modules/@lmstudio/sdk": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/@lmstudio/sdk/-/sdk-1.3.0.tgz",
"integrity": "sha512-ppFnscNbR0WfBaHPA1S1wFSfPHlilPR1+e1uYegKtpo3RO9DJ67nwihNA9xd1ZBP+UH5FAl+lsRKrkMGuiGdtg==",
"license": "Apache-2.0",
"dependencies": {
"@lmstudio/lms-isomorphic": "^0.4.6",
"chalk": "^4.1.2",
"jsonschema": "^1.5.0",
"zod": "^3.22.4",
"zod-to-json-schema": "^3.22.5"
}
},
"node_modules/@tsconfig/node10": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz",
@ -134,6 +157,21 @@
"node": ">=0.4.0"
}
},
"node_modules/ansi-styles": {
"version": "4.3.0",
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
"integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
"license": "MIT",
"dependencies": {
"color-convert": "^2.0.1"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
"node_modules/arg": {
"version": "4.1.3",
"resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz",
@ -171,6 +209,40 @@
"node": ">= 0.4"
}
},
"node_modules/chalk": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
"integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
"license": "MIT",
"dependencies": {
"ansi-styles": "^4.1.0",
"supports-color": "^7.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
"integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
"license": "MIT",
"dependencies": {
"color-name": "~1.1.4"
},
"engines": {
"node": ">=7.0.0"
}
},
"node_modules/color-name": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
"license": "MIT"
},
"node_modules/combined-stream": {
"version": "1.0.8",
"resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
@ -374,6 +446,15 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/has-symbols": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
@ -413,6 +494,15 @@
"node": ">= 0.4"
}
},
"node_modules/jsonschema": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/jsonschema/-/jsonschema-1.5.0.tgz",
"integrity": "sha512-K+A9hhqbn0f3pJX17Q/7H6yQfD/5OXgdrR5UE12gMXCiN9D5Xq2o5mddV2QEcX/bjla99ASsAAQUyMCCRWAEhw==",
"license": "MIT",
"engines": {
"node": "*"
}
},
"node_modules/make-error": {
"version": "1.3.6",
"resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz",
@ -456,6 +546,18 @@
"integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==",
"license": "MIT"
},
"node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"license": "MIT",
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/ts-node": {
"version": "10.9.2",
"resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz",
@ -528,6 +630,27 @@
"dev": true,
"license": "MIT"
},
"node_modules/ws": {
"version": "8.18.3",
"resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz",
"integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==",
"license": "MIT",
"engines": {
"node": ">=10.0.0"
},
"peerDependencies": {
"bufferutil": "^4.0.1",
"utf-8-validate": ">=5.0.2"
},
"peerDependenciesMeta": {
"bufferutil": {
"optional": true
},
"utf-8-validate": {
"optional": true
}
}
},
"node_modules/yn": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz",
@ -537,6 +660,24 @@
"engines": {
"node": ">=6"
}
},
"node_modules/zod": {
"version": "3.25.76",
"resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz",
"integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
},
"node_modules/zod-to-json-schema": {
"version": "3.24.6",
"resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.6.tgz",
"integrity": "sha512-h/z3PKvcTcTetyjl1fkj79MHNEjm+HpD6NXheWjzOekY7kV+lwDYnHw+ivHkijnCSMz1yJaWBD9vu/Fcmk+vEg==",
"license": "ISC",
"peerDependencies": {
"zod": "^3.24.1"
}
}
}
}

View File

@ -24,6 +24,7 @@
"typescript": "^4.7.4"
},
"dependencies": {
"@lmstudio/sdk": "^1.3.0",
"@types/axios": "^0.9.36",
"axios": "^1.10.0",
"dotenv": "^17.2.0",

View File

@ -1,6 +1,6 @@
import { AIdocClient } from './lib/AIdocClient';
const email = 'clodagh.byrne@100girls.club';
const email = 'layla.karam@100girls.club';
async function main() {
console.log(`Starting API test for email: ${email}`);
@ -42,6 +42,11 @@ async function main() {
console.log('--- Testing Get User Profile ---');
const userProfile = await client.getUser(1); // Test with user ID 1
console.log('User Profile for ID 1:', userProfile);
console.log('--- Testing Get Comments ---');
const commentsResponse = await client.getComments(1);
console.log('Comments:', commentsResponse.comments);
console.log('Pagination:', commentsResponse.pagination);
}
console.log('API test finished successfully.');

View File

@ -3,7 +3,7 @@ import * as dotenv from 'dotenv';
import fs from 'fs/promises';
import path from 'path';
import FormData from 'form-data';
import { ApiMethod, Follower, FollowersResponse, FollowingResponse, SignInResponse, User, VerifyResponse } from './interfaces';
import { ApiMethod, Comment, CommentsResponse, Follower, FollowersResponse, FollowingResponse, SignInResponse, User, VerifyResponse } from './interfaces';
// Load environment variables from .env file
dotenv.config();
@ -141,4 +141,17 @@ export class AIdocClient {
const endpoint = `users/${userId}/profile`;
return this._api<User>(ApiMethod.GET, endpoint);
}
public async getComments(page: number, userId?: number): Promise<CommentsResponse> {
let endpoint = `comments?page=${page}`;
if (userId) {
endpoint += `&userId=${userId}`;
}
return this._api<CommentsResponse>(ApiMethod.GET, endpoint);
}
public async replyToComment(commentId: number, text: string): Promise<Comment> {
const endpoint = `comments/${commentId}/replies`;
return this._api<Comment>(ApiMethod.POST, endpoint, { text });
}
}

View File

@ -62,6 +62,25 @@ export interface FollowersResponse {
totalPages: number;
}
export interface Comment {
id: number;
text: string;
created_at: string;
user: User;
post: any; // You might want to define a Post interface as well
parent_comment_id?: number;
}
export interface CommentsResponse {
comments: Comment[];
pagination: {
currentPage: number;
totalPages: number;
pageSize: number;
totalItems: number;
};
}
export interface FollowingResponse {
following: Follower[];
total: number;

333
src/lib/lmStudioClient.ts Normal file
View File

@ -0,0 +1,333 @@
import * as https from 'https';
import * as http from 'http';
import * as fs from 'fs';
import * as path from 'path';
import { LMStudioClient as LMStudioClientSDK, Chat, LLM, PredictionResult } from "@lmstudio/sdk";
/**
* Interface for LMStudio API response
*/
interface LMStudioResponse {
response?: string;
choices?: Array<{
message?: {
content?: string;
[key: string]: any;
};
[key: string]: any;
}>;
[key: string]: any; // For any additional fields in the response
}
/**
* Interface for chat completion parameters
*/
interface ChatCompletionParams {
messages: Array<{
role: string;
content: string;
images?: string[];
}>;
temperature?: number;
max_tokens?: number;
model?: string;
stream?: boolean;
[key: string]: any; // For any additional parameters
}
/**
* Interface for file data to be sent to LMStudio
*/
interface FileData {
filename: string;
content: string | Buffer;
contentType?: string;
}
/**
* LMStudioClient class for communicating with LMStudio API
*/
export class LMStudioClient {
private readonly host: string;
private readonly port: number;
private readonly useHttps: boolean;
private readonly apiPath: string;
private readonly client: LMStudioClientSDK;
private model: LLM | null = null;
/**
* Creates a new LMStudioClient
* @param host LMStudio server host
* @param port LMStudio server port
* @param useHttps Whether to use HTTPS for communication
* @param apiPath Base API path
*/
constructor(
host: string = '192.168.1.104',
port: number = 1234,
useHttps: boolean = false,
apiPath: string = '/api/v0/chat/completions'
) {
this.host = host;
this.port = port;
this.useHttps = useHttps;
this.apiPath = apiPath;
this.client = new LMStudioClientSDK({ baseUrl: `ws://${host}:${port}` });
}
public async init(): Promise<void> {
this.model = await this.client.llm.model();
}
/**
* Executes a prompt and returns the response from LMStudio
* @param prompt The prompt to send to LMStudio
* @param options Additional options for the request
* @returns Promise that resolves to the LMStudio response
*/
public async executePrompt(
prompt: string,
options: {
systemPrompt?: string;
temperature?: number;
maxTokens?: number;
model?: string;
} = {}
): Promise<PredictionResult> {
if (!this.model)
await this.init();
if (!this.model)
throw new Error("Model not initialized. Please call init() before executing a prompt.");
const {
temperature = 0.7,
maxTokens = 2048,
} = options;
const chat = Chat.from([
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: prompt },
])
return await this.model.respond(chat, {
temperature: temperature,
maxTokens: maxTokens,
});
}
/**
* Executes a prompt with files and returns the response from LMStudio
* @param prompt The prompt to send to LMStudio
* @param files Array of files to send with the prompt
* @param options Additional options for the request
* @returns Promise that resolves to the LMStudio response
*/
public async executePromptWithFiles(
prompt: string,
files: string[],
options: {
systemPrompt?: string;
temperature?: number;
maxTokens?: number;
model?: string;
instructions?: string;
} = {}
): Promise<PredictionResult> {
if (!this.model)
await this.init();
if (!this.model)
throw new Error("Model not initialized. Please call init() before executing a prompt.");
const {
temperature = 0.7,
maxTokens = 2048,
} = options;
const images = await Promise.all(files.map((file) => {
return this.client.files.prepareImage(files[0])
}));
const chat = Chat.from([
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: prompt, images: images },
])
return await this.model.respond(chat, {
temperature: temperature,
maxTokens: maxTokens,
});
}
/**
* Makes an HTTP/HTTPS request to the LMStudio API
* @param path API endpoint path
* @param data Request data
* @param headers Optional request headers
* @returns Promise that resolves to the LMStudio response
*/
private makeRequest(
path: string,
data: any,
headers: Record<string, string> = {}
): Promise<LMStudioResponse> {
return new Promise((resolve, reject) => {
const options = {
hostname: this.host,
port: this.port,
path,
method: 'POST',
headers: {
'Content-Type': 'application/json',
...headers
}
};
// Determine whether to use HTTP or HTTPS
const requestModule = this.useHttps ? https : http;
const req = requestModule.request(options, (res) => {
let responseData = '';
res.on('data', (chunk) => {
responseData += chunk;
});
res.on('end', () => {
try {
if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) {
const parsedData = JSON.parse(responseData);
resolve(parsedData);
} else {
reject(new Error(`Request failed with status code ${res.statusCode}: ${responseData}`));
}
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
reject(new Error(`Failed to parse response: ${errorMessage}`));
}
});
});
req.on('error', (error: Error) => {
reject(new Error(`Request error: ${error.message}`));
});
// Send the request data
req.write(JSON.stringify(data));
req.end();
});
}
/**
* Gets the content type based on file extension
* @param filename Filename to determine content type for
* @returns Content type string
*/
private getContentType(filename: string): string {
const ext = path.extname(filename).toLowerCase();
switch (ext) {
case '.jpg':
case '.jpeg':
return 'image/jpeg';
case '.png':
return 'image/png';
case '.gif':
return 'image/gif';
case '.pdf':
return 'application/pdf';
case '.txt':
return 'text/plain';
case '.html':
return 'text/html';
case '.json':
return 'application/json';
case '.xml':
return 'application/xml';
case '.js':
return 'application/javascript';
case '.ts':
return 'application/typescript';
case '.css':
return 'text/css';
default:
return 'application/octet-stream';
}
}
}
/**
* Executes a prompt and returns the response from LMStudio
* @param prompt The prompt to send to LMStudio
* @param options Additional options for the request
* @returns Promise that resolves to the LMStudio response
*/
export async function executePrompt(
prompt: string,
options: {
host?: string;
port?: number;
useHttps?: boolean;
systemPrompt?: string;
temperature?: number;
maxTokens?: number;
model?: string;
} = {}
): Promise<string> {
const {
host,
port,
useHttps,
...clientOptions
} = options;
const client = new LMStudioClient(host, port, useHttps);
const prediction = await client.executePrompt(prompt, clientOptions);
return prediction.content;
}
/**
* Executes a prompt with files and returns the response from LMStudio
* @param prompt The prompt to send to LMStudio
* @param files Array of file paths or FileData objects to send with the prompt
* @param options Additional options for the request
* @returns Promise that resolves to the LMStudio response
*/
export async function executePromptWithFiles(
prompt: string,
files: string[],
options: {
host?: string;
port?: number;
useHttps?: boolean;
systemPrompt?: string;
temperature?: number;
maxTokens?: number;
model?: string;
instructions?: string;
} = {}
): Promise<string> {
const {
host,
port,
useHttps,
...clientOptions
} = options;
const client = new LMStudioClient(host, port, useHttps);
const prediction = await client.executePromptWithFiles(prompt, files, clientOptions);
return prediction.content;
}
// Export default object with both functions for easier imports
export default {
executePrompt,
executePromptWithFiles
};

143
src/replyComment.ts Normal file
View File

@ -0,0 +1,143 @@
import * as dotenv from 'dotenv';
import { AIdocClient } from './lib/AIdocClient';
import { getAllModels } from './lib/utils';
import { executePrompt } from './lib/lmStudioClient';
import { Comment, ModelDetail } from './lib/interfaces';
dotenv.config();
const LM_STUDIO_HOST = process.env.LM_STUDIO_HOST || 'localhost';
const LM_STUDIO_PORT = parseInt(process.env.LM_STUDIO_PORT || '1234', 10);
interface CommentNode {
comment: Comment;
children: CommentNode[];
}
function buildCommentThreads(comments: Comment[]): CommentNode[] {
const commentMap = new Map<number, CommentNode>();
const roots: CommentNode[] = [];
comments.forEach(comment => {
commentMap.set(comment.id, { comment, children: [] });
});
comments.forEach(comment => {
if (comment.parent_comment_id && commentMap.has(comment.parent_comment_id)) {
commentMap.get(comment.parent_comment_id)!.children.push(commentMap.get(comment.id)!);
} else {
roots.push(commentMap.get(comment.id)!);
}
});
return roots;
}
function getThreadDepth(node: CommentNode): number {
if (node.children.length === 0) {
return 1;
}
return 1 + Math.max(...node.children.map(getThreadDepth));
}
function getLastComment(node: CommentNode): Comment {
if (node.children.length === 0) {
return node.comment;
}
// Assuming the last child is the most recent reply in that branch
return getLastComment(node.children[node.children.length - 1]);
}
async function generateReply(model: ModelDetail, thread: CommentNode): Promise<string> {
const flatThread: Comment[] = [];
function flatten(node: CommentNode) {
flatThread.push(node.comment);
node.children.forEach(flatten);
}
flatten(thread);
const threadContent = flatThread.map(comment => `${comment.user.name}: ${comment.text}`).join('\n');
const prompt = `
Model Profile:
- Name: ${model.name}
- Hobby: ${model.hobby || 'Not specified'}
- Job: ${model.job || 'Not specified'}
- City: ${model.city || 'Not specified'}
- Country: ${model.country || 'Not specified'}
- Birthday: ${model.birthdate || 'Not specified'}
Comment Thread:
${threadContent}
Please generate short reply to the comment thread.
it should be a cute and engaging reply in 10 words or less. Use at least one emoji.
Return the response in JSON format like this: {"reply": "your generated reply"}.
`;
try {
const response = await executePrompt(prompt, {
host: LM_STUDIO_HOST,
port: LM_STUDIO_PORT,
});
const jsonMatch = response.match(/\{.*\}/s);
if (jsonMatch) {
const jsonResponse = JSON.parse(jsonMatch[0]);
return jsonResponse.reply;
}
} catch (error) {
console.error('Error generating reply:', error);
}
return '';
}
async function main() {
console.log('Starting auto-reply script...');
const models = await getAllModels();
for (const model of models) {
//if (model.id !== "61") continue; // Skip model with ID 61
if (!model.email) continue;
console.log(`Processing model: ${model.name} (${model.email})`);
const client = new AIdocClient(model.email);
await client.authenticate();
if (!client.user) {
console.error(`Failed to authenticate as ${model.name}`);
continue;
}
const modelUserId = client.user.id;
const commentsResponse = await client.getComments(1); // Get first page of comments on the model's posts
const commentThreads = buildCommentThreads(commentsResponse.comments);
for (const threadRoot of commentThreads) {
const threadDepth = getThreadDepth(threadRoot);
const lastComment = getLastComment(threadRoot);
if (lastComment.user.id !== modelUserId && threadDepth < 5) {
console.log(`Thread needs a reply. Last comment by: ${lastComment.user.name}`);
const replyText = await generateReply(model, threadRoot);
if (replyText.length > 0) {
console.log(`Generated reply: ${replyText}`);
await client.replyToComment(lastComment.id, replyText);
console.log(`Replied to comment ${lastComment.id}`);
}
}
}
}
console.log('Auto-reply script finished.');
}
main().catch(error => {
console.error('An unexpected error occurred:', error);
});

View File

@ -1,6 +1,6 @@
{
"compilerOptions": {
"target": "es6",
"target": "es2018",
"module": "commonjs",
"outDir": "./dist",
"rootDir": "./src",