Added zig source as well

Why not, it was due for long time.  If you guys manage to find
exploits some how, please send than use them malciouly.
This commit is contained in:
Pranshu Sharma 2025-05-14 23:02:20 +10:00
parent a95a61d005
commit a2426dee1c
2 changed files with 315 additions and 0 deletions

18
genserve/build.zig Normal file
View file

@ -0,0 +1,18 @@
const std = @import("std");
// zig fetch --save git+https://github.com/karlseguin/http.zig#master
pub fn build(b: *std.Build) void {
const exe = b.addExecutable(.{
.name = "genserve",
.root_source_file = b.path("src/main.zig"),
.target = b.graph.host,
});
const httpz = b.dependency("httpz", .{
.target = b.graph.host,
// .optimize = .standardOptimizeOption
});
exe.root_module.addImport("httpz", httpz.module("httpz"));
b.installArtifact(exe);
}

297
genserve/src/main.zig Normal file
View file

@ -0,0 +1,297 @@
const std = @import("std");
const httpz = @import("httpz");
const expect = std.testing.expect;
const log = std.log.info;
const Allocator = std.mem.Allocator;
const IdStruct = struct {
blog_file: []const u8,
landing: []const u8,
blog_dir: []const u8,
main_rss_url: []const u8,
rss_dir: []const u8,
main_rss: []const u8,
files: []struct {
art: []const u8,
cat: []const u8,
file: []const u8,
},
cpath: []struct {
file: []const u8,
rss: []const u8,
cat: []const u8
},
};
pub var fo_flags: std.fs.File.OpenFlags = .{.mode = .read_only, .lock = .shared};
pub var cat_path_hash: std.StringHashMap(std.fs.File) = undefined;
pub var cat_rss_hash: std.StringHashMap(std.fs.File) = undefined;
pub var url_path_hash: std.StringHashMap(std.StringHashMap(std.fs.File)) = undefined;
pub var cwd: std.fs.Dir = undefined;
pub const max_buf_size = 1000000000;
pub var blog_file_h: std.fs.File = undefined;
pub var main_rss_h: std.fs.File = undefined;
pub var land_h: std.fs.File = undefined;
pub var server: httpz.Server(void) = undefined;
pub var server_on: bool = true;
fn interrupt(_ : i32) callconv(.C) void {
server.stop();
server_on = false;
}
pub fn main() !void {
var sa: std.posix.Sigaction = .{
.handler = .{ .handler = interrupt },
.mask = std.posix.empty_sigset,
.flags = std.posix.SA.RESTART,
};
std.posix.sigaction(std.posix.SIG.INT, &sa, null);
cwd = std.fs.cwd();
defer { cwd.close(); }
var json_file: []const u8 = ".genorg.json";
var args = std.process.args();
defer { args.deinit(); }
_ = args.skip();
if (args.next()) |val| {
cwd = cwd.openDir(val, .{}) catch {
log("Could not find directory {s}", .{val});
return;
};
if (args.next()) |v| {
json_file = v;
}
}
log("Using file {s}", .{json_file});
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const alctr = gpa.allocator();
defer {
const deinit_status = gpa.deinit();
if (deinit_status == .leak) expect(false) catch @panic("TEST FAIL");
}
const file =
cwd.openFile(json_file, fo_flags) catch {
log("Could not find file {s}", .{json_file});
return;
};
defer file.close();
var reader = std.json.reader(alctr, file.reader());
defer reader.deinit();
const dom =
try std.json.parseFromTokenSource(IdStruct, alctr, &reader, .{});
defer dom.deinit();
log("{s}", .{dom.value.blog_file});
// We use arena allocator to help mitigate memory fragmentation
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
const alctr2 = arena.allocator();
url_path_hash = std.StringHashMap(std.StringHashMap(std.fs.File)).init(alctr2);
defer {
var it = url_path_hash.valueIterator();
while (it.next()) |key| {
var it2 = key.valueIterator();
while (it2.next()) |val| {
val.close();
}
key.deinit();
}
url_path_hash.deinit();
}
for (dom.value.files) |fc| {
const res = url_path_hash.getPtr(fc.cat);
log("[{s}]->[{s}] = {s}", .{fc.cat,fc.art,fc.file});
const fh = try cwd.openFile(fc.file, fo_flags);
if (res) |p| {
try p.*.put(fc.art, fh);
} else {
var newmap = std.StringHashMap(std.fs.File).init(alctr2);
try newmap.put(fc.art, fh);
try url_path_hash.put(fc.cat, newmap);
}
}
cat_path_hash = std.StringHashMap(std.fs.File).init(alctr2);
cat_rss_hash = std.StringHashMap(std.fs.File).init(alctr2);
defer cat_path_hash.deinit();
for (dom.value.cpath) |s| {
const fh = try cwd.openFile(s.file, fo_flags);
log("File {s}", .{s.file});
try cat_path_hash.put(s.cat, fh);
const rfh = try cwd.openFile(s.rss, fo_flags);
try cat_rss_hash.put(s.cat, rfh);
}
defer {
var it = cat_path_hash.valueIterator();
while (it.next()) |val| {
val.close();
}
it = cat_rss_hash.valueIterator();
while (it.next()) |val| {
val.close();
}
}
// Now that we have done all the json stuff, we need to init web server now.
server = try httpz.Server(void).init(alctr, .{.port = 9669}, {});
defer {
if (server_on) {
server.stop();
}
server.deinit();
}
var router = try server.router(.{});
// We open the main file and the blog file
blog_file_h = try cwd.openFile(dom.value.blog_file, fo_flags);
defer blog_file_h.close();
main_rss_h = try cwd.openFile(dom.value.main_rss, fo_flags);
defer main_rss_h.close();
log("Landing file set to {s}", .{dom.value.landing});
land_h = try cwd.openFile(dom.value.landing, fo_flags);
defer land_h.close();
// This is a speical value, we make this sepcial in perl file
log("rss_dir is {s}", .{dom.value.rss_dir});
log("main rss url is {s}", .{dom.value.main_rss_url});
router.get(dom.value.main_rss_url,
struct {fn f(_: *httpz.Request, res: *httpz.Response) !void {
res.body = try main_rss_h.readToEndAlloc(res.arena, max_buf_size);
try main_rss_h.seekTo(0);
}}.f
,.{});
router.get(dom.value.rss_dir,
struct {fn f(req: *httpz.Request, res: *httpz.Response) !void {
const fh = cat_rss_hash.get(req.param("cat").?);
if (fh) |ff| {
res.body = try ff.readToEndAlloc(res.arena, max_buf_size);
try ff.seekTo(0);
} else {
res.body = "Not found";
}
}}.f
, .{});
router.get("/", struct {fn f(_: *httpz.Request, res: *httpz.Response) !void {
res.body = try land_h.readToEndAlloc(res.arena, max_buf_size);
try land_h.seekTo(0);
}}.f, .{});
var blog_routes = router.group(dom.value.blog_dir, .{});
blog_routes.get("/", struct {fn f(_: *httpz.Request, res: *httpz.Response) !void {
res.body = try blog_file_h.readToEndAlloc(res.arena, max_buf_size);
try blog_file_h.seekTo(0);
}}.f, .{});
blog_routes.get("/:cc", getCat, .{});
blog_routes.get("/:cat/:id", getArt, .{});
router.all("/*", getFile, .{});
log("Starting http server", .{});
try server.listen();
log("Shutting down", .{});
}
fn char_to_correct_number (c: u8) ?u8 {
if (c > 'F') {
return null;
}
if (c > 58) {
return c - 55;
} else {
return c - 48;
}
}
fn getFile (req: *httpz.Request, res: *httpz.Response) !void {
const strlen = req.url.path.len - 1;
if (strlen <= 1) {
return;
}
const path = req.url.path[1..];
var ci: usize = 0;
if (path[0] == req.url.path[0]) {
res.body = "Nice try, not tolerating it";
return;
}
var output = try res.arena.alloc(u8, strlen);
{
var pi: usize = 0;
while (pi < strlen) : (ci += 1) {
const char = path[pi];
if (char == '%') {
// We convert the hex to decimal
pi += 1;
const a = char_to_correct_number(path[pi]) orelse {
res.body = "Nice try";
return;
};
pi += 1;
const b = char_to_correct_number(path[pi]) orelse {
res.body = "Nice try";
return;
};
output[ci] = (a * 16) + b;
pi += 1;
} else {
output[ci] = char;
pi += 1;
}
}
}
var file = cwd.openFile(output[0..ci], fo_flags) catch {
res.body = "Not found";
return;
};
defer file.close();
res.body = try file.readToEndAlloc(res.arena, max_buf_size);
}
fn getCat(req: *httpz.Request, res: *httpz.Response) !void {
const fh = cat_path_hash.get(req.param("cc").?);
if (fh) |f| {
res.body = try f.readToEndAlloc(res.arena, max_buf_size);
try f.seekTo(0);
} else {
res.body = "Not found";
}
}
fn getArt(req: *httpz.Request, res: *httpz.Response) !void {
res.status = 200;
const chash = url_path_hash.get(req.param("cat").?);
if (chash) |ch| {
const ahash = ch.get(req.param("id").?);
if (ahash) |fh| {
// This means we show the article
res.body = try fh.readToEndAlloc(res.arena, max_buf_size);
try fh.seekTo(0);
} else {
res.body = "Not found";
}
} else {
res.body = "Not found";
}
}