Python SDK 概览
命名惯例:snake_case
Pulumi Python SDK 遵循 Python 惯例,所有属性使用 snake_case(下划线命名),而 TypeScript SDK 使用 camelCase(驼峰命名)。
| TypeScript | Python |
|---|---|
| instanceType | instance_type |
| subnetId | subnet_id |
| cidrBlock | cidr_block |
| enableDnsHostnames | enable_dns_hostnames |
| vpcSecurityGroupIds | vpc_security_group_ids |
安装与配置
# 创建 Python + AWS 项目
pulumi new aws-python
# 激活虚拟环境
source venv/bin/activate
# 手动安装 SDK(requirements.txt 中已有,一般直接 pip install -r)
pip install pulumi pulumi-aws
# 添加更多 Provider
pip install pulumi-kubernetes pulumi-gcp pulumi-azure-native
Python 中的 Output.apply()
import pulumi
import pulumi_aws as aws
# 基本 apply
bucket = aws.s3.BucketV2("my-bucket")
url = bucket.id.apply(lambda name: f"https://{name}.s3.amazonaws.com")
# 多个 Output 合并:Output.all()
db = aws.rds.Instance(
"db",
engine="postgres",
instance_class="db.t3.micro",
allocated_storage=20,
username="admin",
password="mypassword",
skip_final_snapshot=True,
)
# 将多个 Output 合并后处理
conn_str = pulumi.Output.all(db.address, db.port, db.name).apply(
lambda args: f"postgresql://admin@{args[0]}:{args[1]}/{args[2]}"
)
# 或者更清晰的写法(Python 3.8+ 海象运算符):
conn_str2 = pulumi.Output.all(
host=db.address, port=db.port, name=db.db_name
).apply(lambda args:
f"postgresql://admin@{args['host']}:{args['port']}/{args['name']}"
)
pulumi.export("db_connection", conn_str2)
for 循环动态创建资源
列表推导创建多个资源
import pulumi
import pulumi_aws as aws
# 在多个可用区创建私有子网
availability_zones = ["us-east-1a", "us-east-1b", "us-east-1c"]
vpc = aws.ec2.Vpc("main", cidr_block="10.0.0.0/16")
private_subnets = [
aws.ec2.Subnet(
f"private-{az}",
vpc_id=vpc.id,
cidr_block=f"10.0.{i + 10}.0/24",
availability_zone=az,
tags={"Name": f"private-{az}", "Type": "private"},
)
for i, az in enumerate(availability_zones)
]
# 导出所有子网 ID
pulumi.export("private_subnet_ids", [s.id for s in private_subnets])
字典驱动的配置模式
# 用字典描述多个服务的配置,动态创建资源
services = {
"api": {"cpu": 256, "memory": 512, "port": 8080, "count": 2},
"worker": {"cpu": 512, "memory": 1024, "port": 0, "count": 3},
"cron": {"cpu": 128, "memory": 256, "port": 0, "count": 1},
}
# 读取配置(来自 pulumi config)
config = pulumi.Config()
env = config.get("environment") or "dev"
ecs_services = {}
for name, svc in services.items():
task_def = aws.ecs.TaskDefinition(
f"{name}-task",
family=f"{name}-{env}",
cpu=str(svc["cpu"]),
memory=str(svc["memory"]),
network_mode="awsvpc",
requires_compatibilities=["FARGATE"],
container_definitions=pulumi.Output.from_input([{
"name": name,
"image": f"myapp/{name}:latest",
"portMappings": [{"containerPort": svc["port"]}] if svc["port"] else [],
}]).apply(import("json").dumps),
)
ecs_services[name] = task_def
实战:Python 创建 EKS Kubernetes 集群
架构设计
EKS 集群架构
┌──────────────────────────────────────────────────────┐
│ EKS Control Plane (AWS 托管) │
│ │
│ ┌──────────────┐ ┌──────────────┐ │
│ │ Node Group │ │ Node Group │ │
│ │ (系统节点) │ │ (应用节点) │ │
│ │ t3.medium × 2│ │ t3.large × 3│ │
│ └──────────────┘ └──────────────┘ │
│ │
│ ┌─────────────────────────────────────────────┐ │
│ │ IAM Roles:ClusterRole + NodeRole + OIDC │ │
│ └─────────────────────────────────────────────┘ │
└──────────────────────────────────────────────────────┘
# eks_cluster.py — 用 Python 创建完整的 EKS 集群
import json
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
cluster_name = config.get("clusterName") or "my-eks-cluster"
k8s_version = config.get("k8sVersion") or "1.29"
# ── IAM 角色 ──────────────────────────────────
# 1. EKS 集群角色
cluster_role = aws.iam.Role(
"eks-cluster-role",
assume_role_policy=json.dumps({
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"Service": "eks.amazonaws.com"},
"Action": "sts:AssumeRole",
}]
})
)
aws.iam.RolePolicyAttachment(
"eks-cluster-policy",
role=cluster_role.name,
policy_arn="arn:aws:iam::aws:policy/AmazonEKSClusterPolicy",
)
# 2. 节点组角色
node_role = aws.iam.Role(
"eks-node-role",
assume_role_policy=json.dumps({
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole",
}]
})
)
# 批量附加节点角色策略
node_policies = [
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
]
for i, arn in enumerate(node_policies):
aws.iam.RolePolicyAttachment(
f"node-policy-{i}",
role=node_role.name,
policy_arn=arn,
)
# ── 网络资源 ──────────────────────────────────
vpc = aws.ec2.Vpc("eks-vpc",
cidr_block="10.1.0.0/16",
enable_dns_hostnames=True,
enable_dns_support=True,
tags={"Name": "eks-vpc", f"kubernetes.io/cluster/{cluster_name}": "shared"},
)
azs = ["us-east-1a", "us-east-1b", "us-east-1c"]
private_subnets = [
aws.ec2.Subnet(
f"private-{az}",
vpc_id=vpc.id,
cidr_block=f"10.1.{i}.0/24",
availability_zone=az,
tags={
"Name": f"private-{az}",
f"kubernetes.io/cluster/{cluster_name}": "shared",
"kubernetes.io/role/internal-elb": "1",
},
)
for i, az in enumerate(azs)
]
# ── EKS 集群 ──────────────────────────────────
cluster = aws.eks.Cluster(
cluster_name,
role_arn=cluster_role.arn,
version=k8s_version,
vpc_config=aws.eks.ClusterVpcConfigArgs(
subnet_ids=[s.id for s in private_subnets],
endpoint_public_access=True,
endpoint_private_access=True,
),
tags={"Name": cluster_name},
)
# ── 节点组配置(字典驱动)──────────────────────
node_group_configs = {
"system": {
"instance_types": ["t3.medium"],
"min_size": 1, "max_size": 3, "desired_size": 2,
"labels": {"role": "system"},
},
"app": {
"instance_types": ["t3.large"],
"min_size": 2, "max_size": 10, "desired_size": 3,
"labels": {"role": "app"},
},
}
node_groups = {
ng_name: aws.eks.NodeGroup(
f"{cluster_name}-{ng_name}",
cluster_name=cluster.name,
node_role_arn=node_role.arn,
subnet_ids=[s.id for s in private_subnets],
instance_types=ng["instance_types"],
scaling_config=aws.eks.NodeGroupScalingConfigArgs(
min_size=ng["min_size"],
max_size=ng["max_size"],
desired_size=ng["desired_size"],
),
labels=ng["labels"],
tags={"Name": f"{cluster_name}-{ng_name}"},
)
for ng_name, ng in node_group_configs.items()
}
# 导出
pulumi.export("cluster_name", cluster.name)
pulumi.export("cluster_endpoint", cluster.endpoint)
pulumi.export("kubeconfig_certificate", cluster.certificate_authority.data)
生成 kubeconfig 连接集群
EKS 集群创建后,用以下命令生成 kubeconfig:aws eks update-kubeconfig --name my-eks-cluster --region us-east-1,然后即可使用 kubectl get nodes 连接集群。也可以在 Pulumi 程序中通过 @pulumi/eks(TypeScript)或 pulumi_eks(Python)高层组件自动生成 kubeconfig 输出。
本章小结
本章核心要点
- snake_case 命名:Python SDK 所有属性使用下划线,与 TypeScript 的驼峰命名对应。
- 列表推导创建资源:
[Resource(f"name-{i}", ...) for i, az in enumerate(azs)]比 HCL 的 for_each 更直观。 - 字典驱动配置:用 Python 字典描述多个服务/资源的差异化配置,循环创建,避免代码重复。
- Output.all() 关键字参数:Python 版本支持关键字参数(
Output.all(host=db.address, port=db.port)),回调中用字典访问,比位置参数更清晰。 - 复杂资源编排:EKS 集群需要 IAM 角色、VPC、子网等多个前置资源,Pulumi 通过 Output 引用自动推断创建顺序。