Add logic to reply comments
This commit is contained in:
@ -1,6 +1,6 @@
|
||||
import { AIdocClient } from './lib/AIdocClient';
|
||||
|
||||
const email = 'clodagh.byrne@100girls.club';
|
||||
const email = 'layla.karam@100girls.club';
|
||||
|
||||
async function main() {
|
||||
console.log(`Starting API test for email: ${email}`);
|
||||
@ -42,6 +42,11 @@ async function main() {
|
||||
console.log('--- Testing Get User Profile ---');
|
||||
const userProfile = await client.getUser(1); // Test with user ID 1
|
||||
console.log('User Profile for ID 1:', userProfile);
|
||||
|
||||
console.log('--- Testing Get Comments ---');
|
||||
const commentsResponse = await client.getComments(1);
|
||||
console.log('Comments:', commentsResponse.comments);
|
||||
console.log('Pagination:', commentsResponse.pagination);
|
||||
}
|
||||
|
||||
console.log('API test finished successfully.');
|
||||
|
||||
@ -3,7 +3,7 @@ import * as dotenv from 'dotenv';
|
||||
import fs from 'fs/promises';
|
||||
import path from 'path';
|
||||
import FormData from 'form-data';
|
||||
import { ApiMethod, Follower, FollowersResponse, FollowingResponse, SignInResponse, User, VerifyResponse } from './interfaces';
|
||||
import { ApiMethod, Comment, CommentsResponse, Follower, FollowersResponse, FollowingResponse, SignInResponse, User, VerifyResponse } from './interfaces';
|
||||
|
||||
// Load environment variables from .env file
|
||||
dotenv.config();
|
||||
@ -141,4 +141,17 @@ export class AIdocClient {
|
||||
const endpoint = `users/${userId}/profile`;
|
||||
return this._api<User>(ApiMethod.GET, endpoint);
|
||||
}
|
||||
|
||||
public async getComments(page: number, userId?: number): Promise<CommentsResponse> {
|
||||
let endpoint = `comments?page=${page}`;
|
||||
if (userId) {
|
||||
endpoint += `&userId=${userId}`;
|
||||
}
|
||||
return this._api<CommentsResponse>(ApiMethod.GET, endpoint);
|
||||
}
|
||||
|
||||
public async replyToComment(commentId: number, text: string): Promise<Comment> {
|
||||
const endpoint = `comments/${commentId}/replies`;
|
||||
return this._api<Comment>(ApiMethod.POST, endpoint, { text });
|
||||
}
|
||||
}
|
||||
|
||||
@ -62,6 +62,25 @@ export interface FollowersResponse {
|
||||
totalPages: number;
|
||||
}
|
||||
|
||||
export interface Comment {
|
||||
id: number;
|
||||
text: string;
|
||||
created_at: string;
|
||||
user: User;
|
||||
post: any; // You might want to define a Post interface as well
|
||||
parent_comment_id?: number;
|
||||
}
|
||||
|
||||
export interface CommentsResponse {
|
||||
comments: Comment[];
|
||||
pagination: {
|
||||
currentPage: number;
|
||||
totalPages: number;
|
||||
pageSize: number;
|
||||
totalItems: number;
|
||||
};
|
||||
}
|
||||
|
||||
export interface FollowingResponse {
|
||||
following: Follower[];
|
||||
total: number;
|
||||
|
||||
333
src/lib/lmStudioClient.ts
Normal file
333
src/lib/lmStudioClient.ts
Normal file
@ -0,0 +1,333 @@
|
||||
import * as https from 'https';
|
||||
import * as http from 'http';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import { LMStudioClient as LMStudioClientSDK, Chat, LLM, PredictionResult } from "@lmstudio/sdk";
|
||||
|
||||
/**
|
||||
* Interface for LMStudio API response
|
||||
*/
|
||||
interface LMStudioResponse {
|
||||
response?: string;
|
||||
choices?: Array<{
|
||||
message?: {
|
||||
content?: string;
|
||||
[key: string]: any;
|
||||
};
|
||||
[key: string]: any;
|
||||
}>;
|
||||
[key: string]: any; // For any additional fields in the response
|
||||
}
|
||||
|
||||
/**
|
||||
* Interface for chat completion parameters
|
||||
*/
|
||||
interface ChatCompletionParams {
|
||||
messages: Array<{
|
||||
role: string;
|
||||
content: string;
|
||||
images?: string[];
|
||||
}>;
|
||||
temperature?: number;
|
||||
max_tokens?: number;
|
||||
model?: string;
|
||||
stream?: boolean;
|
||||
[key: string]: any; // For any additional parameters
|
||||
}
|
||||
|
||||
/**
|
||||
* Interface for file data to be sent to LMStudio
|
||||
*/
|
||||
interface FileData {
|
||||
filename: string;
|
||||
content: string | Buffer;
|
||||
contentType?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* LMStudioClient class for communicating with LMStudio API
|
||||
*/
|
||||
export class LMStudioClient {
|
||||
private readonly host: string;
|
||||
private readonly port: number;
|
||||
private readonly useHttps: boolean;
|
||||
private readonly apiPath: string;
|
||||
private readonly client: LMStudioClientSDK;
|
||||
private model: LLM | null = null;
|
||||
|
||||
/**
|
||||
* Creates a new LMStudioClient
|
||||
* @param host LMStudio server host
|
||||
* @param port LMStudio server port
|
||||
* @param useHttps Whether to use HTTPS for communication
|
||||
* @param apiPath Base API path
|
||||
*/
|
||||
constructor(
|
||||
host: string = '192.168.1.104',
|
||||
port: number = 1234,
|
||||
useHttps: boolean = false,
|
||||
apiPath: string = '/api/v0/chat/completions'
|
||||
) {
|
||||
this.host = host;
|
||||
this.port = port;
|
||||
this.useHttps = useHttps;
|
||||
this.apiPath = apiPath;
|
||||
|
||||
this.client = new LMStudioClientSDK({ baseUrl: `ws://${host}:${port}` });
|
||||
}
|
||||
|
||||
public async init(): Promise<void> {
|
||||
this.model = await this.client.llm.model();
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a prompt and returns the response from LMStudio
|
||||
* @param prompt The prompt to send to LMStudio
|
||||
* @param options Additional options for the request
|
||||
* @returns Promise that resolves to the LMStudio response
|
||||
*/
|
||||
public async executePrompt(
|
||||
prompt: string,
|
||||
options: {
|
||||
systemPrompt?: string;
|
||||
temperature?: number;
|
||||
maxTokens?: number;
|
||||
model?: string;
|
||||
} = {}
|
||||
): Promise<PredictionResult> {
|
||||
|
||||
if (!this.model)
|
||||
await this.init();
|
||||
|
||||
if (!this.model)
|
||||
throw new Error("Model not initialized. Please call init() before executing a prompt.");
|
||||
|
||||
const {
|
||||
temperature = 0.7,
|
||||
maxTokens = 2048,
|
||||
} = options;
|
||||
|
||||
const chat = Chat.from([
|
||||
{ role: "system", content: "You are a helpful assistant." },
|
||||
{ role: "user", content: prompt },
|
||||
])
|
||||
|
||||
return await this.model.respond(chat, {
|
||||
temperature: temperature,
|
||||
maxTokens: maxTokens,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a prompt with files and returns the response from LMStudio
|
||||
* @param prompt The prompt to send to LMStudio
|
||||
* @param files Array of files to send with the prompt
|
||||
* @param options Additional options for the request
|
||||
* @returns Promise that resolves to the LMStudio response
|
||||
*/
|
||||
public async executePromptWithFiles(
|
||||
prompt: string,
|
||||
files: string[],
|
||||
options: {
|
||||
systemPrompt?: string;
|
||||
temperature?: number;
|
||||
maxTokens?: number;
|
||||
model?: string;
|
||||
instructions?: string;
|
||||
} = {}
|
||||
): Promise<PredictionResult> {
|
||||
if (!this.model)
|
||||
await this.init();
|
||||
|
||||
if (!this.model)
|
||||
throw new Error("Model not initialized. Please call init() before executing a prompt.");
|
||||
|
||||
const {
|
||||
temperature = 0.7,
|
||||
maxTokens = 2048,
|
||||
} = options;
|
||||
|
||||
const images = await Promise.all(files.map((file) => {
|
||||
return this.client.files.prepareImage(files[0])
|
||||
}));
|
||||
|
||||
const chat = Chat.from([
|
||||
{ role: "system", content: "You are a helpful assistant." },
|
||||
{ role: "user", content: prompt, images: images },
|
||||
])
|
||||
|
||||
return await this.model.respond(chat, {
|
||||
temperature: temperature,
|
||||
maxTokens: maxTokens,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Makes an HTTP/HTTPS request to the LMStudio API
|
||||
* @param path API endpoint path
|
||||
* @param data Request data
|
||||
* @param headers Optional request headers
|
||||
* @returns Promise that resolves to the LMStudio response
|
||||
*/
|
||||
private makeRequest(
|
||||
path: string,
|
||||
data: any,
|
||||
headers: Record<string, string> = {}
|
||||
): Promise<LMStudioResponse> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const options = {
|
||||
hostname: this.host,
|
||||
port: this.port,
|
||||
path,
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...headers
|
||||
}
|
||||
};
|
||||
|
||||
// Determine whether to use HTTP or HTTPS
|
||||
const requestModule = this.useHttps ? https : http;
|
||||
|
||||
const req = requestModule.request(options, (res) => {
|
||||
let responseData = '';
|
||||
|
||||
res.on('data', (chunk) => {
|
||||
responseData += chunk;
|
||||
});
|
||||
|
||||
res.on('end', () => {
|
||||
try {
|
||||
if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) {
|
||||
const parsedData = JSON.parse(responseData);
|
||||
resolve(parsedData);
|
||||
} else {
|
||||
reject(new Error(`Request failed with status code ${res.statusCode}: ${responseData}`));
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
reject(new Error(`Failed to parse response: ${errorMessage}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', (error: Error) => {
|
||||
reject(new Error(`Request error: ${error.message}`));
|
||||
});
|
||||
|
||||
// Send the request data
|
||||
req.write(JSON.stringify(data));
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the content type based on file extension
|
||||
* @param filename Filename to determine content type for
|
||||
* @returns Content type string
|
||||
*/
|
||||
private getContentType(filename: string): string {
|
||||
const ext = path.extname(filename).toLowerCase();
|
||||
|
||||
switch (ext) {
|
||||
case '.jpg':
|
||||
case '.jpeg':
|
||||
return 'image/jpeg';
|
||||
case '.png':
|
||||
return 'image/png';
|
||||
case '.gif':
|
||||
return 'image/gif';
|
||||
case '.pdf':
|
||||
return 'application/pdf';
|
||||
case '.txt':
|
||||
return 'text/plain';
|
||||
case '.html':
|
||||
return 'text/html';
|
||||
case '.json':
|
||||
return 'application/json';
|
||||
case '.xml':
|
||||
return 'application/xml';
|
||||
case '.js':
|
||||
return 'application/javascript';
|
||||
case '.ts':
|
||||
return 'application/typescript';
|
||||
case '.css':
|
||||
return 'text/css';
|
||||
default:
|
||||
return 'application/octet-stream';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a prompt and returns the response from LMStudio
|
||||
* @param prompt The prompt to send to LMStudio
|
||||
* @param options Additional options for the request
|
||||
* @returns Promise that resolves to the LMStudio response
|
||||
*/
|
||||
export async function executePrompt(
|
||||
prompt: string,
|
||||
options: {
|
||||
host?: string;
|
||||
port?: number;
|
||||
useHttps?: boolean;
|
||||
systemPrompt?: string;
|
||||
temperature?: number;
|
||||
maxTokens?: number;
|
||||
model?: string;
|
||||
} = {}
|
||||
): Promise<string> {
|
||||
const {
|
||||
host,
|
||||
port,
|
||||
useHttps,
|
||||
...clientOptions
|
||||
} = options;
|
||||
|
||||
const client = new LMStudioClient(host, port, useHttps);
|
||||
|
||||
const prediction = await client.executePrompt(prompt, clientOptions);
|
||||
return prediction.content;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a prompt with files and returns the response from LMStudio
|
||||
* @param prompt The prompt to send to LMStudio
|
||||
* @param files Array of file paths or FileData objects to send with the prompt
|
||||
* @param options Additional options for the request
|
||||
* @returns Promise that resolves to the LMStudio response
|
||||
*/
|
||||
export async function executePromptWithFiles(
|
||||
prompt: string,
|
||||
files: string[],
|
||||
options: {
|
||||
host?: string;
|
||||
port?: number;
|
||||
useHttps?: boolean;
|
||||
systemPrompt?: string;
|
||||
temperature?: number;
|
||||
maxTokens?: number;
|
||||
model?: string;
|
||||
instructions?: string;
|
||||
} = {}
|
||||
): Promise<string> {
|
||||
const {
|
||||
host,
|
||||
port,
|
||||
useHttps,
|
||||
...clientOptions
|
||||
} = options;
|
||||
|
||||
const client = new LMStudioClient(host, port, useHttps);
|
||||
|
||||
const prediction = await client.executePromptWithFiles(prompt, files, clientOptions);
|
||||
return prediction.content;
|
||||
|
||||
}
|
||||
|
||||
// Export default object with both functions for easier imports
|
||||
export default {
|
||||
executePrompt,
|
||||
executePromptWithFiles
|
||||
};
|
||||
143
src/replyComment.ts
Normal file
143
src/replyComment.ts
Normal file
@ -0,0 +1,143 @@
|
||||
import * as dotenv from 'dotenv';
|
||||
import { AIdocClient } from './lib/AIdocClient';
|
||||
import { getAllModels } from './lib/utils';
|
||||
import { executePrompt } from './lib/lmStudioClient';
|
||||
import { Comment, ModelDetail } from './lib/interfaces';
|
||||
|
||||
dotenv.config();
|
||||
|
||||
const LM_STUDIO_HOST = process.env.LM_STUDIO_HOST || 'localhost';
|
||||
const LM_STUDIO_PORT = parseInt(process.env.LM_STUDIO_PORT || '1234', 10);
|
||||
|
||||
interface CommentNode {
|
||||
comment: Comment;
|
||||
children: CommentNode[];
|
||||
}
|
||||
|
||||
function buildCommentThreads(comments: Comment[]): CommentNode[] {
|
||||
const commentMap = new Map<number, CommentNode>();
|
||||
const roots: CommentNode[] = [];
|
||||
|
||||
comments.forEach(comment => {
|
||||
commentMap.set(comment.id, { comment, children: [] });
|
||||
});
|
||||
|
||||
comments.forEach(comment => {
|
||||
if (comment.parent_comment_id && commentMap.has(comment.parent_comment_id)) {
|
||||
commentMap.get(comment.parent_comment_id)!.children.push(commentMap.get(comment.id)!);
|
||||
} else {
|
||||
roots.push(commentMap.get(comment.id)!);
|
||||
}
|
||||
});
|
||||
|
||||
return roots;
|
||||
}
|
||||
|
||||
function getThreadDepth(node: CommentNode): number {
|
||||
if (node.children.length === 0) {
|
||||
return 1;
|
||||
}
|
||||
return 1 + Math.max(...node.children.map(getThreadDepth));
|
||||
}
|
||||
|
||||
function getLastComment(node: CommentNode): Comment {
|
||||
if (node.children.length === 0) {
|
||||
return node.comment;
|
||||
}
|
||||
// Assuming the last child is the most recent reply in that branch
|
||||
return getLastComment(node.children[node.children.length - 1]);
|
||||
}
|
||||
|
||||
async function generateReply(model: ModelDetail, thread: CommentNode): Promise<string> {
|
||||
const flatThread: Comment[] = [];
|
||||
function flatten(node: CommentNode) {
|
||||
flatThread.push(node.comment);
|
||||
node.children.forEach(flatten);
|
||||
}
|
||||
flatten(thread);
|
||||
|
||||
const threadContent = flatThread.map(comment => `${comment.user.name}: ${comment.text}`).join('\n');
|
||||
const prompt = `
|
||||
Model Profile:
|
||||
- Name: ${model.name}
|
||||
- Hobby: ${model.hobby || 'Not specified'}
|
||||
- Job: ${model.job || 'Not specified'}
|
||||
- City: ${model.city || 'Not specified'}
|
||||
- Country: ${model.country || 'Not specified'}
|
||||
- Birthday: ${model.birthdate || 'Not specified'}
|
||||
|
||||
Comment Thread:
|
||||
${threadContent}
|
||||
|
||||
Please generate short reply to the comment thread.
|
||||
it should be a cute and engaging reply in 10 words or less. Use at least one emoji.
|
||||
Return the response in JSON format like this: {"reply": "your generated reply"}.
|
||||
`;
|
||||
|
||||
try {
|
||||
const response = await executePrompt(prompt, {
|
||||
host: LM_STUDIO_HOST,
|
||||
port: LM_STUDIO_PORT,
|
||||
});
|
||||
|
||||
const jsonMatch = response.match(/\{.*\}/s);
|
||||
if (jsonMatch) {
|
||||
const jsonResponse = JSON.parse(jsonMatch[0]);
|
||||
return jsonResponse.reply;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error generating reply:', error);
|
||||
}
|
||||
|
||||
return '';
|
||||
}
|
||||
|
||||
|
||||
async function main() {
|
||||
console.log('Starting auto-reply script...');
|
||||
const models = await getAllModels();
|
||||
|
||||
for (const model of models) {
|
||||
|
||||
//if (model.id !== "61") continue; // Skip model with ID 61
|
||||
if (!model.email) continue;
|
||||
|
||||
console.log(`Processing model: ${model.name} (${model.email})`);
|
||||
const client = new AIdocClient(model.email);
|
||||
await client.authenticate();
|
||||
|
||||
if (!client.user) {
|
||||
console.error(`Failed to authenticate as ${model.name}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
const modelUserId = client.user.id;
|
||||
const commentsResponse = await client.getComments(1); // Get first page of comments on the model's posts
|
||||
const commentThreads = buildCommentThreads(commentsResponse.comments);
|
||||
|
||||
|
||||
for (const threadRoot of commentThreads) {
|
||||
const threadDepth = getThreadDepth(threadRoot);
|
||||
const lastComment = getLastComment(threadRoot);
|
||||
|
||||
if (lastComment.user.id !== modelUserId && threadDepth < 5) {
|
||||
console.log(`Thread needs a reply. Last comment by: ${lastComment.user.name}`);
|
||||
|
||||
const replyText = await generateReply(model, threadRoot);
|
||||
|
||||
if (replyText.length > 0) {
|
||||
console.log(`Generated reply: ${replyText}`);
|
||||
|
||||
await client.replyToComment(lastComment.id, replyText);
|
||||
console.log(`Replied to comment ${lastComment.id}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log('Auto-reply script finished.');
|
||||
}
|
||||
|
||||
main().catch(error => {
|
||||
console.error('An unexpected error occurred:', error);
|
||||
});
|
||||
Reference in New Issue
Block a user