-
Notifications
You must be signed in to change notification settings - Fork 1
/
index.js
158 lines (128 loc) Β· 4.82 KB
/
index.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
require('dotenv').config()
const express = require("express")
const path = require('path')
const limit = require("express-rate-limit")
const { v4: uuidv4 } = require("uuid")
const { OpenAI } = require("openai")
const app = express()
const PORT = process.env.PORT || 6006
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY })
const systemMessages = [
"Generate a new fragment shader or modify the existing one based on the user description.",
"Output in GLSL only.",
`Precision: "precision mediump float;"`,
`Declare these uniforms at the top of the shader ONLY if needed for the given user description:
1. float u_time
2. vec2 u_mouse
3. vec2 u_resolution`,
"Input: varying vec2 fragCoord",
"Main function: Provide a custom main function for the fragment shader based on the user description.",
"Ensure that any functions used in the shader code are fully defined within the code.",
"Do not add any explanation or text before or after the shader code. Do not include backticks (\`\`\`) in the response.",
]
const getShaderContext = (shader) => `Given the current GLSL fragment shader code: ${shader}`
const createPrompt = (prompt) => `Generate a new fragment shader based on this prompt: "${prompt}".`
const minifyShaderCode = (shaderCode) =>
shaderCode.replace(/\/\*[\s\S]*?\*\//g, '')
.replace(/\/\/.*/g, '')
.replace(/\s+/g, ' ')
.replace(/\s*([\(\),\{\}])\s*/g, '$1')
const MAX_REQUESTS_PER_MINUTE = 10
const MAX_REQUESTS_PER_PERIOD = 512
const REQUESTS_PER_MINUTE_MS = 1 * 60 * 1000
const REQUESTS_PER_DAY_MS = 24 * 60 * 60 * 1000
const apiLimiter = limit({
windowMs: REQUESTS_PER_MINUTE_MS,
max: MAX_REQUESTS_PER_MINUTE,
message: "Too many requests (max 10 per minute). Please try again later.",
})
const ipQuotas = new Map()
setInterval(() => {
const now = Date.now()
for (const [ip, quota] of ipQuotas.entries()) {
if (quota.resetTime <= now) {
ipQuotas.delete(ip)
}
}
}, REQUESTS_PER_DAY_MS)
app.use(express.static(path.join(__dirname, 'dist')))
app.get('/', (_, res) => {
res.status(200).sendFile(path.join(__dirname, 'dist/index.html'))
})
app.use("/api/ai", apiLimiter)
app.use("/api/ai", (req, res, next) => {
const ip = req.headers['x-forwarded-for'] || req.socket.remoteAddress
const now = Date.now()
let quota = ipQuotas.get(ip)
if (!quota) {
quota = { id: uuidv4(), count: 0, resetTime: now + REQUESTS_PER_DAY_MS }
ipQuotas.set(ip, quota)
}
if (quota.resetTime <= now) {
quota.count = 0
quota.resetTime = now + REQUESTS_PER_DAY_MS
}
if (quota.count >= MAX_REQUESTS_PER_PERIOD) {
const message = "Raie limit exceeded. Please try again later."
res.writeHead(200, {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
})
res.write(`event: error\n`)
res.write(`data: ${JSON.stringify({ status: 429, message })}\n\n`)
res.end()
} else {
quota.count += 1
next()
}
})
app.get("/api/ai", async (req, res) => {
const apiKey = req.query['apiKey']
const prompt = req.query['prompt']
const shader = req.query['shader']
const decodedPrompt = decodeURI(prompt)
const decodedShader = Buffer.from(decodeURIComponent(shader), 'base64').toString('utf-8')
res.writeHead(200, {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
})
try {
const { chat } = !!apiKey ? new OpenAI({ apiKey }) : openai
const completion = await chat.completions.create({
model: "gpt-4",
messages: [
...systemMessages.map((content) => ({
role: "system",
content
})),
{
role: "user",
content: getShaderContext(minifyShaderCode(decodedShader)) + '\n' + createPrompt(decodedPrompt),
}
],
stream: true,
})
req.on('close', () => {
res.end()
})
for await (const data of completion) {
res.write(`data: ${JSON.stringify(data)}\n\n`);
}
res.write('event: stream-complete\ndata: {}\n\n');
res.end();
} catch (error) {
const message = error.status === 429
? "Out of funds for OpenAI requests on current API key. Please use a different key or try again later."
: error.message
res.write(`event: error\n`)
res.write(`data: ${JSON.stringify({ status: error.status, message })}\n\n`)
res.end()
}
})
app.use('*', (_, res) => res.sendStatus(404))
app.listen(PORT, () => {
console.log(`listening on ${PORT}`)
})
module.exports = app