TCP Server:基础网络编程
const std = @import("std");
const net = std.net;
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
_ = allocator;
// 绑定 0.0.0.0:8080
const address = try net.Address.parseIp4("0.0.0.0", 8080);
var server = try address.listen(.{
.reuse_address = true,
});
defer server.deinit();
std.debug.print("Server listening on :8080\n", .{});
while (true) {
const conn = try server.accept();
std.debug.print("New connection from {}\n", .{conn.address});
// 处理连接(单线程版本)
handleConnection(conn.stream) catch |err| {
std.debug.print("Error: {}\n", .{err});
};
}
}
fn handleConnection(stream: net.Stream) !void {
defer stream.close();
var buf: [4096]u8 = undefined;
const n = try stream.read(&buf);
if (n == 0) return;
std.debug.print("Request:\n{s}\n", .{buf[0..n]});
try stream.writeAll("HTTP/1.1 200 OK\r\nContent-Length: 5\r\n\r\nHello");
}
HTTP 请求解析
const Request = struct {
method: []const u8,
path: []const u8,
version: []const u8,
body: []const u8,
pub fn parse(raw: []const u8) !Request {
var lines = std.mem.splitSequence(u8, raw, "\r\n");
const request_line = lines.next() orelse return error.InvalidRequest;
var parts = std.mem.splitScalar(u8, request_line, ' ');
const method = parts.next() orelse return error.InvalidRequest;
const path = parts.next() orelse return error.InvalidRequest;
const version = parts.next() orelse return error.InvalidRequest;
// 跳过 headers,找到 body
const body_start = std.mem.indexOf(u8, raw, "\r\n\r\n");
const body = if (body_start) |idx| raw[idx + 4..] else "";
return .{
.method = method,
.path = path,
.version = version,
.body = body,
};
}
};
路由设计
const Handler = *const fn (req: Request, writer: anytype) anyerror!void;
const Route = struct {
method: []const u8,
path: []const u8,
handler: Handler,
};
const routes = [_]Route{
.{ .method = "GET", .path = "/", .handler = indexHandler },
.{ .method = "GET", .path = "/ping", .handler = pingHandler },
.{ .method = "POST", .path = "/echo", .handler = echoHandler },
};
fn dispatch(req: Request, writer: anytype) !void {
for (routes) |route| {
if (std.mem.eql(u8, route.method, req.method) and
std.mem.eql(u8, route.path, req.path))
{
return route.handler(req, writer);
}
}
try writeResponse(writer, 404, "Not Found");
}
fn indexHandler(req: Request, writer: anytype) !void {
_ = req;
try writeResponse(writer, 200, "Welcome to Zig HTTP Server!");
}
fn pingHandler(req: Request, writer: anytype) !void {
_ = req;
try writeResponse(writer, 200, "pong");
}
fn echoHandler(req: Request, writer: anytype) !void {
try writeResponse(writer, 200, req.body);
}
fn writeResponse(writer: anytype, status: u16, body: []const u8) !void {
try writer.print(
"HTTP/1.1 {d} OK\r\nContent-Type: text/plain\r\nContent-Length: {d}\r\n\r\n{s}",
.{ status, body.len, body },
);
}
多线程并发处理
const ConnContext = struct {
stream: net.Stream,
allocator: std.mem.Allocator,
};
fn workerThread(ctx: ConnContext) void {
defer ctx.stream.close();
var arena = std.heap.ArenaAllocator.init(ctx.allocator);
defer arena.deinit();
var buf: [8192]u8 = undefined;
const n = ctx.stream.read(&buf) catch return;
if (n == 0) return;
const req = Request.parse(buf[0..n]) catch {
_ = ctx.stream.writeAll("HTTP/1.1 400 Bad Request\r\n\r\n") catch {};
return;
};
var writer = ctx.stream.writer();
dispatch(req, &writer) catch |err| {
std.debug.print("Handler error: {}\n", .{err});
};
}
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
const allocator = gpa.allocator();
const address = try net.Address.parseIp4("0.0.0.0", 8080);
var server = try address.listen(.{ .reuse_address = true });
defer server.deinit();
std.debug.print("Multithreaded server on :8080\n", .{});
while (true) {
const conn = try server.accept();
const ctx = ConnContext{ .stream = conn.stream, .allocator = allocator };
// 每个连接一个线程
const thread = try std.Thread.spawn(.{}, workerThread, .{ctx});
thread.detach(); // 分离:线程自行结束,不需要 join
}
}
使用 httpz 库
// src/main.zig(使用 httpz 库,比手写更完整)
const httpz = @import("httpz");
pub fn main() !void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
const allocator = gpa.allocator();
var server = try httpz.Server().init(allocator, .{ .port = 8080 });
defer server.deinit();
var router = server.router();
router.get("/", index);
router.get("/user/:id", getUser);
router.post("/user", createUser);
std.debug.print("httpz server on :8080\n", .{});
try server.listen();
}
fn index(req: *httpz.Request, res: *httpz.Response) !void {
_ = req;
res.status(200);
try res.json(.{ .message = "Hello from Zig!" }, .{});
}
fn getUser(req: *httpz.Request, res: *httpz.Response) !void {
const id = req.param("id") orelse "unknown";
try res.json(.{ .id = id, .name = "Alice" }, .{});
}
fn createUser(req: *httpz.Request, res: *httpz.Response) !void {
const body = try req.body();
std.debug.print("Create user: {s}\n", .{body orelse ""});
res.status(201);
try res.json(.{ .created = true }, .{});
}
性能测试
# 编译 release 版本
zig build -Doptimize=ReleaseFast
# 使用 wrk 进行基准测试
wrk -t4 -c100 -d30s http://localhost:8080/ping
# 使用 hey 进行简单测试
hey -n 10000 -c 50 http://localhost:8080/
# 使用 ab (Apache Bench)
ab -n 10000 -c 100 http://localhost:8080/ping
Zig 服务器性能提示
- 使用 ArenaAllocator 处理每个请求,一次性释放,避免频繁 malloc/free
- 使用线程池而非每请求一线程,避免线程创建开销
- 考虑使用 io_uring(Linux)或 kqueue(macOS)实现事件驱动
- httpz 库内置了线程池和高效 I/O,生产环境推荐直接使用
- Release 模式(
-Doptimize=ReleaseFast)性能比 Debug 快 10-50 倍
JSON 处理
std.json:标准库 JSON 支持
Zig 标准库内置了完整的 JSON 解析和序列化功能,不需要第三方库。
const std = @import("std");
// 序列化 Zig 结构体为 JSON 字符串
const User = struct {
id: u64,
name: []const u8,
email: []const u8,
active: bool,
};
pub fn serializeExample(allocator: std.mem.Allocator) !void {
const user = User{
.id = 1,
.name = "Alice",
.email = "alice@example.com",
.active = true,
};
// 序列化为 JSON 字符串
const json_str = try std.json.stringifyAlloc(allocator, user, .{});
defer allocator.free(json_str);
std.debug.print("{s}\n", .{json_str});
// {"id":1,"name":"Alice","email":"alice@example.com","active":true}
// 格式化输出(带缩进)
const pretty = try std.json.stringifyAlloc(allocator, user, .{
.whitespace = .indent_4,
});
defer allocator.free(pretty);
std.debug.print("{s}\n", .{pretty});
}
pub fn parseExample(allocator: std.mem.Allocator) !void {
const json_input =
\\{"id":42,"name":"Bob","email":"bob@example.com","active":false}
;
// 解析 JSON 为 Zig 结构体(类型安全)
const parsed = try std.json.parseFromSlice(User, allocator, json_input, .{});
defer parsed.deinit();
const user = parsed.value;
std.debug.print("id={d}, name={s}\n", .{ user.id, user.name });
}
pub fn parseDynamic(allocator: std.mem.Allocator) !void {
const json_input = \\{"key":"value","num":123,"arr":[1,2,3]};
// 解析为动态值(当结构未知时)
const parsed = try std.json.parseFromSlice(
std.json.Value, allocator, json_input, .{}
);
defer parsed.deinit();
// 动态访问字段
const root = parsed.value.object;
if (root.get("key")) |val| {
std.debug.print("key={s}\n", .{val.string});
}
if (root.get("num")) |val| {
std.debug.print("num={d}\n", .{val.integer});
}
}
线程池实现
简单固定大小线程池
每请求一线程的模型在高并发下会因为线程创建开销和大量上下文切换导致性能下降。线程池预先创建固定数量的工作线程,用任务队列分发工作。
const std = @import("std");
// 任务类型:函数指针 + 上下文数据
const Task = struct {
func: *const fn (ctx: *anyopaque) void,
ctx: *anyopaque,
};
pub const ThreadPool = struct {
threads: []std.Thread,
queue: std.ArrayList(Task),
mutex: std.Thread.Mutex,
cond: std.Thread.Condition,
shutdown: bool,
allocator: std.mem.Allocator,
pub fn init(allocator: std.mem.Allocator, thread_count: usize) !ThreadPool {
var pool = ThreadPool{
.threads = try allocator.alloc(std.Thread, thread_count),
.queue = std.ArrayList(Task).init(allocator),
.mutex = .{},
.cond = .{},
.shutdown = false,
.allocator = allocator,
};
// 启动工作线程
for (pool.threads) |*t| {
t.* = try std.Thread.spawn(.{}, workerLoop, .{&pool});
}
return pool;
}
pub fn deinit(self: *ThreadPool) void {
// 通知所有线程关闭
self.mutex.lock();
self.shutdown = true;
self.cond.broadcast();
self.mutex.unlock();
for (self.threads) |t| t.join();
self.queue.deinit();
self.allocator.free(self.threads);
}
pub fn submit(self: *ThreadPool, task: Task) !void {
self.mutex.lock();
defer self.mutex.unlock();
try self.queue.append(task);
self.cond.signal(); // 唤醒一个等待的工作线程
}
fn workerLoop(pool: *ThreadPool) void {
while (true) {
pool.mutex.lock();
// 等待任务或关闭信号
while (pool.queue.items.len == 0 and !pool.shutdown) {
pool.cond.wait(&pool.mutex); // 原子解锁并等待
}
if (pool.shutdown and pool.queue.items.len == 0) {
pool.mutex.unlock();
return;
}
// 取出队头任务
const task = pool.queue.orderedRemove(0);
pool.mutex.unlock();
// 执行任务(无锁)
task.func(task.ctx);
}
}
};
中间件模式
请求处理链
中间件(Middleware)是在请求到达实际处理函数之前/之后执行的通用逻辑,如日志记录、认证、限流等。在 Zig 中可以用函数指针链实现。
const HandlerFn = fn (req: *Request, res: *Response) anyerror!void;
const Response = struct {
status: u16 = 200,
body: []const u8 = "",
stream: *std.net.Stream,
pub fn send(self: *Response, allocator: std.mem.Allocator) !void {
const headers = try std.fmt.allocPrint(
allocator,
"HTTP/1.1 {d} OK\r\nContent-Length: {d}\r\n\r\n",
.{ self.status, self.body.len },
);
defer allocator.free(headers);
try self.stream.writeAll(headers);
try self.stream.writeAll(self.body);
}
};
// 日志中间件:记录请求方法和路径
fn loggingMiddleware(req: *Request, res: *Response, next: *const HandlerFn) !void {
const start = std.time.milliTimestamp();
std.debug.print("{s} {s}\n", .{ req.method, req.path });
try next(req, res); // 调用下一个处理器
const elapsed = std.time.milliTimestamp() - start;
std.debug.print("--> {d} ({d}ms)\n", .{ res.status, elapsed });
}
// 认证中间件:检查 Authorization 头
fn authMiddleware(req: *Request, res: *Response, next: *const HandlerFn) !void {
// 跳过不需要认证的路径
if (std.mem.eql(u8, req.path, "/") or std.mem.eql(u8, req.path, "/health")) {
return next(req, res);
}
// 检查 token(简化示例)
if (req.headers.get("Authorization")) |auth| {
if (std.mem.startsWith(u8, auth, "Bearer valid-token")) {
return next(req, res);
}
}
res.status = 401;
res.body = "Unauthorized";
}
错误处理最佳实践
服务器中的错误恢复
HTTP 服务器不应该因为单个请求的错误而崩溃。正确的做法是捕获每个连接的错误,记录日志,并向客户端发送 500 响应。
fn safeHandleConnection(stream: std.net.Stream, allocator: std.mem.Allocator) void {
defer stream.close();
// 使用 Arena 分配器:请求结束时一次性释放所有内存
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
const req_allocator = arena.allocator();
// 捕获所有错误,避免 worker 线程崩溃
handleConnectionInner(stream, req_allocator) catch |err| {
std.log.err("Connection error: {s}", .{@errorName(err)});
// 尝试发送 500 响应(忽略发送错误)
_ = stream.writeAll("HTTP/1.1 500 Internal Server Error\r\nContent-Length: 21\r\n\r\nInternal Server Error");
};
}
fn handleConnectionInner(stream: std.net.Stream, allocator: std.mem.Allocator) !void {
var buf: [8192]u8 = undefined;
const n = try stream.read(&buf);
if (n == 0) return;
const req = try Request.parse(buf[0..n]);
var writer = stream.writer();
try dispatch(req, &writer);
// 记录到标准日志系统
std.log.info("{s} {s} -> 200", .{ req.method, req.path });
_ = allocator;
}
// std.log 接口:支持不同级别
// std.log.debug("...") — 仅 Debug 模式输出
// std.log.info("...") — 普通信息
// std.log.warn("...") — 警告
// std.log.err("...") — 错误(始终输出)
Arena 分配器在服务器中的作用
每个 HTTP 请求通常需要分配大量临时内存(解析字符串、构建响应体等)。使用 ArenaAllocator 包装 GeneralPurposeAllocator 后,请求处理期间的所有分配都在一个 Arena 中,处理完成后一次性 deinit() 释放——比逐个 free() 快得多,也避免了忘记 free 导致的内存泄漏。httpz 库内部就使用了这种模式。
项目完整结构
# 完整 HTTP 服务器项目结构
http_server/
├── build.zig # 构建脚本
├── build.zig.zon # 依赖声明(可选:httpz)
└── src/
├── main.zig # 入口:初始化、监听循环
├── server.zig # 服务器核心:accept、线程池
├── request.zig # HTTP 请求解析
├── response.zig # HTTP 响应构建
├── router.zig # 路由表
├── middleware.zig # 中间件(日志、认证、限流)
└── handlers/
├── user.zig # /user 路由的业务逻辑
└── health.zig # /health 健康检查端点
本章小结与课程总结
本章核心要点
- 从 TCP 到 HTTP 的路径:std.net.Address.listen() → accept() → read/write Stream;HTTP 不是黑魔法,就是 TCP 上特定格式的文本协议
- ArenaAllocator 是服务器的最佳伴侣:每个请求一个 Arena,处理完成后整体 deinit(),比逐个 free 更快更安全,避免内存泄漏
- std.json 开箱即用:parseFromSlice 将 JSON 解析为类型安全的 Zig 结构体;stringifyAlloc 将结构体序列化为 JSON 字符串;两者都是零依赖
- 线程池避免连接爆炸:固定数量工作线程 + Mutex/Condition 任务队列;线程创建有开销,高并发下每请求一线程会导致数千线程同时运行
- 错误隔离防止服务崩溃:worker 函数用 catch 捕获所有错误并记录,不让单个请求的错误传播导致整个 server 崩溃
- 生产环境用 httpz:自己实现 HTTP 服务器是学习,生产环境直接用 httpz —— 它处理了 HTTP/1.1 的所有细节、线程池、keepalive、slow client 攻击防御
Zig 系统编程课程总结
完成本课程后,你掌握了 Zig 的核心:无隐藏控制流的设计哲学、显式内存管理与 Allocator 接口、错误联合类型的传播模式、comptime 泛型与编译期计算、以及与 C 的无缝互操作。Zig 的设计目标始终是"在你看到代码时,就能理解它做了什么"——没有隐藏的分配、没有隐藏的控制流、没有隐藏的约定。这种简洁性是学习 Zig 最大的收获,也是它在嵌入式、系统工具、高性能服务器等领域持续获得认可的根本原因。