handle memory issues

This commit is contained in:
tdurieux
2026-05-07 21:01:07 +03:00
parent 369fd8edb2
commit 3eeed23609
6 changed files with 60 additions and 10 deletions
+1 -1
View File
@@ -36,4 +36,4 @@ COPY --from=build /app/public ./public
COPY package.json ./package.json
COPY healthcheck.js ./healthcheck.js
CMD ["node", "./build/server/index.js"]
CMD ["node", "--max-old-space-size=2048", "./build/server/index.js"]
+8 -1
View File
@@ -5,6 +5,10 @@ services:
image: tdurieux/anonymous_github:v2
ports:
- $EXPOSED_PORT:5000
deploy:
resources:
limits:
memory: 3G
env_file:
- ./.env
volumes:
@@ -39,7 +43,10 @@ services:
mode: replicated
replicas: 4
endpoint_mode: dnsrr
entrypoint: ["node", "./build/streamer/index.js"]
resources:
limits:
memory: 768M
entrypoint: ["node", "--max-old-space-size=512", "./build/streamer/index.js"]
env_file:
- ./.env
volumes:
+19 -2
View File
@@ -14,10 +14,27 @@ import {
const urlRegex =
/<?\b((https?|ftp|file):\/\/)[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]\b\/?>?/g;
export function streamToString(stream: Readable): Promise<string> {
export function streamToString(
stream: Readable,
maxBytes = 2 * 1024 * 1024
): Promise<string> {
const chunks: Buffer[] = [];
let totalBytes = 0;
return new Promise((resolve, reject) => {
stream.on("data", (chunk) => chunks.push(Buffer.from(chunk)));
stream.on("data", (chunk) => {
const buf = Buffer.from(chunk);
totalBytes += buf.length;
if (totalBytes > maxBytes) {
stream.destroy();
reject(
new Error(
`Stream exceeded ${maxBytes} bytes, refusing to buffer into memory`
)
);
return;
}
chunks.push(buf);
});
stream.on("error", (err) => reject(err));
stream.on("end", () => resolve(Buffer.concat(chunks).toString("utf8")));
});
+13 -1
View File
@@ -93,7 +93,19 @@ export default async function start() {
const app = express();
app.use(express.json());
app.use(compression());
app.use(
compression({
filter: (req, res) => {
// Skip compression for streamed file content — these responses are
// piped from the streamer and can be very large. Compressing them
// forces the middleware to hold per-response zlib buffers that pile
// up under concurrent load and contribute to heap exhaustion.
// Binary files (images, archives) barely compress anyway.
if (req.path.match(/^\/api\/repo\/.+\/file\//)) return false;
return compression.filter(req, res);
},
})
);
app.set("etag", "strong");
// handle session and connection
+8 -4
View File
@@ -149,10 +149,14 @@ async function webView(req: express.Request, res: express.Response) {
});
}
if (f.extension() == "md") {
const content = await streamToString(await f.anonymizedContent());
const body = sanitizeHtml(marked.marked(content, { headerIds: false, mangle: false }), sanitizeOptions);
const html = `<!DOCTYPE html><html><head><title>Content</title></head><link rel="stylesheet" href="/css/all.min.css" /><body><div class="container p-3 file-content markdown-body">${body}</div></body></html>`;
res.contentType("text/html").send(html);
try {
const content = await streamToString(await f.anonymizedContent());
const body = sanitizeHtml(marked.marked(content, { headerIds: false, mangle: false }), sanitizeOptions);
const html = `<!DOCTYPE html><html><head><title>Content</title></head><link rel="stylesheet" href="/css/all.min.css" /><body><div class="container p-3 file-content markdown-body">${body}</div></body></html>`;
res.contentType("text/html").send(html);
} catch {
f.send(res);
}
} else {
f.send(res);
}
+11 -1
View File
@@ -15,7 +15,17 @@ const logger = createLogger("streamer");
const app = express();
app.use(express.json());
app.use(compression());
app.use(
compression({
filter: (req, res) => {
// The streamer serves file blobs that are often binary (images,
// archives) and can be very large. Compressing them holds zlib
// buffers per response that pile up under concurrent load.
if (req.path === "/api" && req.method === "POST") return false;
return compression.filter(req, res);
},
})
);
app.use("/api", router);