-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.js
234 lines (200 loc) · 5.95 KB
/
index.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
//Import dependencies
import {
// AutojoinRoomsMixin,
MatrixClient,
SimpleFsStorageProvider,
RichRepliesPreprocessor,
RichReply,
} from "matrix-bot-sdk";
import { readFileSync } from "node:fs";
import { parse } from "yaml";
import axios from "axios";
import crypto from "node:crypto"; // ES6+ module syntax
import { remark } from "remark";
import remarkRehype from "remark-rehype";
import rehypeSanitize from "rehype-sanitize";
import rehypeStringify from "rehype-stringify";
//Parse YAML configuration file
const loginFile = readFileSync("./db/login.yaml", "utf8");
const loginParsed = parse(loginFile);
const homeserver = loginParsed["homeserver-url"];
const accessToken = loginParsed["login-token"];
const model = loginParsed["llama-model"];
//the bot sync something idk bro it was here in the example so i dont touch it ;-;
const storage = new SimpleFsStorageProvider("bot.json");
//login to client
const client = new MatrixClient(homeserver, accessToken, storage);
// AutojoinRoomsMixin.setupOnClient(client);
// //do not include replied message in message
// client.addPreprocessor(new RichRepliesPreprocessor(false));
//preallocate variables so they have a global scope
let mxid;
const context = new Map();
const contextID = new Map();
const prompt = new Map();
const timeout = 60 * 60 * 1000;
//system prompt for llama
const defaultContext = {
role: "system",
content: loginParsed["default-prompt"],
};
async function generate(messages, roomID) {
//generate chat context
const context = [prompt.get(roomID) || defaultContext];
if (messages) context.push(...messages);
// Request body
const body = {
model,
messages: context,
stream: false,
};
try {
// Make request
const response = await axios.post("http://localhost:11434/api/chat", body, {
timeout,
});
// Return the response data
return response.data;
} catch (error) {
console.error("Error fetching data:", error);
}
}
const filter = {
//dont expect any presence from m.org, but in the case presence shows up its irrelevant to this bot
presence: { senders: [] },
room: {
//ephemeral events are never used in this bot, are mostly inconsequentail and irrelevant
ephemeral: { senders: [] },
//we fetch state manually later, hopefully with better load balancing
state: {
senders: [],
types: [],
lazy_load_members: true,
},
//we will manually fetch events anyways, this is just limiting how much backfill bot gets as to not
//respond to events far out of view
timeline: {
limit: 25,
},
},
};
//Start Client
client.start(filter).then(async (filter) => {
console.log("Client started!");
//get mxid
mxid = await client.getUserId().catch(() => {});
});
//when the client recieves an event
client.on("room.event", async (roomID, event) => {
//ignore events sent by self, unless its a banlist policy update
if (
event.sender === mxid ||
event.sender === "@anti-scam:matrix.org" ||
event.content?.msgtype !== "m.text" ||
!event.content?.body
) {
return;
}
const resetCMD = "!llama new";
if (event.content.body.startsWith(resetCMD)) {
//set new prompt
prompt.set(roomID, {
role: "system",
content:
event.content.body.substring(resetCMD.length + 1 /*space after cmd*/) ||
loginParsed["default-prompt"], //default
});
//set new context id
contextID.set(roomID, crypto.randomBytes(32).toString("base64"));
client
.sendEvent(roomID, "m.reaction", {
"m.relates_to": {
event_id: event.event_id,
key: "✅",
rel_type: "m.annotation",
},
})
.catch((e) => console.error(`unable to react in ${roomID}.`));
return;
}
//get past messages, let id default to roomid if a new context hasnt been created
const cID = contextID.get(roomID) || roomID;
let rc = context.get(cID);
//if none, load empty
if (!rc) {
rc = [];
context.set(cID, rc);
}
//limit context
if (rc.length > 30) rc.shift();
//new message
const newUserMessage = { role: "user", content: event.content.body };
//indicate recieved message
client.sendReadReceipt(roomID, event.event_id);
//indicate typing
client.setTyping(roomID, true, timeout).catch(() => {});
console.log(
`Generating prompt in ${roomID} with message "${event.content.body}" and context ${JSON.stringify(rc)}`,
);
const responseJSON = await generate([...rc, newUserMessage], roomID);
//stop indicating typing
client.setTyping(roomID, false).catch(() => {});
//no response
if (!responseJSON) return console.error("empty response returned from LLM.");
//error response
if (responseJSON.error) return console.error(responseJSON.error);
//broken response
if (!responseJSON.message?.content)
return console.error("No message returned in response from LLM.");
//push new message
rc.push(newUserMessage);
//limit context
if (rc.length > 30) rc.shift();
//add response to context
rc.push(responseJSON.message);
//send reply
if (responseJSON.message.content === "\n\n") {
client
.sendEvent(roomID, "m.reaction", {
"m.relates_to": {
event_id: event.event_id,
key: "👍",
rel_type: "m.annotation",
},
})
.catch((e) => console.error(`unable to react in ${roomID}.`));
} else if (responseJSON.message.content === "\n\n\n\n") {
client
.sendEvent(roomID, "m.reaction", {
"m.relates_to": {
event_id: event.event_id,
key: "👎",
rel_type: "m.annotation",
},
})
.catch((e) => console.error(`unable to react in ${roomID}.`));
} else {
//for some reason llama likes to output markdown, matrix does formatting in html
let parsedResponse;
try {
parsedResponse = await remark()
.use(remarkRehype)
.use(rehypeSanitize)
.use(rehypeStringify)
.process(responseJSON.message.content);
} catch (e) {
parsedResponse = `<h3>Unable to parse</h3>\n<code>${e}</code> \n${responseJSON.message.content}`;
}
client
.sendMessage(
roomID,
RichReply.createFor(
roomID,
event,
responseJSON.message.content,
parsedResponse,
),
)
.catch((e) => console.error(`unable to message in ${roomID}.`));
}
});