add nextcloud + fmt

This commit is contained in:
Florian Maury 2025-01-26 17:56:16 +01:00 committed by Florian
parent 1b9eeb1288
commit ca1c3d8347
44 changed files with 3335 additions and 1897 deletions

120
main.tf
View file

@ -1,73 +1,73 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
version = "~>0.56.1"
source = "bpg/proxmox"
version = ">= 0.70.0"
}
}
required_version = ">=1.6.2"
}
provider "proxmox" {
endpoint = var.pve_api_base_url
api_token = var.pve_api_token
endpoint = var.pve_api_base_url
api_token = var.pve_api_token
}
module "netboot_server" {
source = "./modules/netboot_server"
hostname = "netboot_server"
prod_network_name = var.admin_network_name
dhcp_iface = "ens18"
dhcp_server_ip_addr = cidrhost(var.admin_network_prefix, 2)
dhcp_gateway = cidrhost(var.admin_network_prefix, 1)
dhcp_range = var.admin_network_prefix
ssh_public_key_opentofu_netboot_server = var.ssh_public_key_opentofu_netboot_server
# module "netboot_server" {
# source = "./modules/netboot_server"
# hostname = "netboot_server"
# prod_network_name = var.admin_network_name
# dhcp_iface = "ens18"
# dhcp_server_ip_addr = cidrhost(var.admin_network_prefix, 2)
# dhcp_gateway = cidrhost(var.admin_network_prefix, 1)
# dhcp_range = var.admin_network_prefix
# ssh_public_key_opentofu_netboot_server = var.ssh_public_key_opentofu_netboot_server
pve_api_base_url = var.pve_api_base_url
pve_api_token = var.pve_api_token
pve_node_name = var.pve_node_name
pve_storage_id = var.pve_storage_id
pve_vm_id = 108
}
# pve_api_base_url = var.pve_api_base_url
# pve_api_token = var.pve_api_token
# pve_node_name = var.pve_node_name
# pve_storage_id = var.pve_storage_id
# pve_vm_id = 108
# }
module "poc" {
depends_on = [ module.netboot_server ]
source = "./modules/poc"
pve_vm_id = 110
pve_storage_id = "local"
pve_node_name = "ns3152888"
pve_ssh_user = var.pve_ssh_user
pve_ssh_host = var.pve_ssh_host
netboot_server_ip_address = cidrhost(var.admin_network_prefix, 2)
admin_network = {
name = var.admin_network_name
prefix = var.admin_network_prefix
mac_address = "1c:69:7a:ff:ff:01"
}
prod_network = {
name = var.prod_network_name
prefix = var.prod_network_prefix
mac_address = "1c:69:7a:ef:ff:01"
}
monitoring_network = {
name = var.monit_network_name
prefix = var.monit_network_prefix
mac_address = "1c:69:7a:df:ff:01"
}
admin_ssh_public_key = var.ssh_public_key_admin_netboot_server
}
# module "poc" {
# depends_on = [module.netboot_server]
# source = "./modules/poc"
# pve_vm_id = 110
# pve_storage_id = "local"
# pve_node_name = "ns3152888"
# pve_ssh_user = var.pve_ssh_user
# pve_ssh_host = var.pve_ssh_host
# netboot_server_ip_address = cidrhost(var.admin_network_prefix, 2)
# admin_network = {
# name = var.admin_network_name
# prefix = var.admin_network_prefix
# mac_address = "1c:69:7a:ff:ff:01"
# }
# prod_network = {
# name = var.prod_network_name
# prefix = var.prod_network_prefix
# mac_address = "1c:69:7a:ef:ff:01"
# }
# monitoring_network = {
# name = var.monit_network_name
# prefix = var.monit_network_prefix
# mac_address = "1c:69:7a:df:ff:01"
# }
# admin_ssh_public_key = var.ssh_public_key_admin_netboot_server
# }
locals {
castopod_domain = "pod.broken-by-design.fr"
castopod_domain = "pod.broken-by-design.fr"
castopod_upstream_port = 8000
}
module "castopod_config" {
source = "./modules/castopod"
base_url = "https://pod.broken-by-design.fr/"
castopod_domain = local.castopod_domain
base_url = "https://pod.broken-by-design.fr/"
castopod_domain = local.castopod_domain
castopod_upstream_port = local.castopod_upstream_port
ssh_authorized_keys = [
file("/var/home/fmaury/.ssh/fma_ovh_rise2.pub")
@ -83,10 +83,26 @@ module "caddy_config" {
upstreams = [
"10.109.0.13:${local.castopod_upstream_port}"
]
},
{
domain = "nextcloud.broken-by-design.fr"
upstreams = [
"10.109.0.16:8443"
]
}
]
ssh_authorized_keys = [
file("/var/home/fmaury/.ssh/fma_ovh_rise2.pub")
]
ssh_authorized_keys = var.ssh_authorized_keys
}
module "nextcloud_config" {
source = "./modules/nextcloud"
ssh_authorized_keys = var.ssh_authorized_keys
nextcloud_domain = "nextcloud.broken-by-design.fr"
reverse_proxy_ip_address = "10.109.0.14"
nextcloud_trusted_domains = [
"nextcloud.broken-by-design.fr",
]
luks_passphrase = var.nextcloud_luks_passphrase
luks_use_tpm2 = false
}

View file

@ -1,11 +1,11 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
source = "bpg/proxmox"
version = "~>0.56.1"
}
ignition = {
source = "community-terraform-providers/ignition"
source = "community-terraform-providers/ignition"
version = "2.3.4"
}
}
@ -13,48 +13,48 @@ terraform {
}
data "ignition_disk" "data" {
device = "/dev/disk/by-path/0000:00:0b.0"
partition {
label = "caddy_config"
number = 0
sizemib = 100
startmib = 0
type_guid = "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
}
partition {
label = "caddy_data"
number = 0
sizemib = 1000
startmib = 0
type_guid = "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
}
device = "/dev/disk/by-path/0000:00:0b.0"
partition {
label = "caddy_config"
number = 0
sizemib = 100
startmib = 0
type_guid = "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
}
partition {
label = "caddy_data"
number = 0
sizemib = 1000
startmib = 0
type_guid = "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
}
}
data "ignition_filesystem" "caddy_config" {
device = "/dev/disk/by-label/caddy_config"
format = "btrfs"
wipe_filesystem = true
label = "caddy_config"
path = "/caddy/config"
mount_options = ["nodev", "noexec", "nosuid"]
device = "/dev/disk/by-label/caddy_config"
format = "btrfs"
wipe_filesystem = true
label = "caddy_config"
path = "/caddy/config"
mount_options = ["nodev", "noexec", "nosuid"]
}
data "ignition_filesystem" "caddy_data" {
device = "/dev/disk/by-label/caddy_data"
format = "btrfs"
wipe_filesystem = true
label = "caddy_data"
path = "/caddy/data"
mount_options = ["nodev", "noexec", "nosuid"]
device = "/dev/disk/by-label/caddy_data"
format = "btrfs"
wipe_filesystem = true
label = "caddy_data"
path = "/caddy/data"
mount_options = ["nodev", "noexec", "nosuid"]
}
data "ignition_config" "acme_server" {
disks = [
data.ignition_disk.data.rendered,
]
filesystems = [
data.ignition_filesystem.caddy_config.rendered,
data.ignition_filesystem.caddy_data.rendered,
]
disks = [
data.ignition_disk.data.rendered,
]
filesystems = [
data.ignition_filesystem.caddy_config.rendered,
data.ignition_filesystem.caddy_data.rendered,
]
}

View file

@ -1,3 +1,3 @@
output "test" {
value = data.ignition_config.acme_server.rendered
value = data.ignition_config.acme_server.rendered
}

View file

@ -1,5 +1,5 @@
variable "fcos_base_vm_id" {
type = number
nullable = false
type = number
nullable = false
}

View file

@ -1,151 +1,151 @@
locals {
data_device_path = "/dev/vdb"
data_device_path = "/dev/vdb"
caddy_version = "2.8.4-alpine"
caddy_version = "2.8.4-alpine"
caddy_config_dir_path = "/opt/caddy_config"
caddy_data_volume_name = "caddy_data"
caddy_network_name = "caddy_net"
caddy_config_dir_path = "/opt/caddy_config"
caddy_data_volume_name = "caddy_data"
caddy_network_name = "caddy_net"
data_disk = {
device = local.data_device_path
wipeTable = true
partitions = [
data_disk = {
device = local.data_device_path
wipeTable = true
partitions = [
{
label = local.caddy_data_volume_name
number = 1
sizeMiB = 512
wipePartitionEntry = true
shouldExist = true
resize = true
},
]
}
caddy_data_filesystem = {
device = "${local.data_device_path}1"
format = "ext4"
label = local.caddy_data_volume_name
}
caddy_data_volume_file = {
path = "/etc/containers/systemd/${local.caddy_data_volume_name}.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/caddy_data.volume.tftpl",
{
label = local.caddy_data_volume_name
number = 1
sizeMiB = 512
wipePartitionEntry = true
shouldExist = true
resize = true
},
]
caddy_data_volume_name = local.caddy_data_volume_name
}
)
)
)
}
}
caddy_data_filesystem = {
device = "${local.data_device_path}1"
format = "ext4"
label = local.caddy_data_volume_name
caddy_config_directory = {
path = local.caddy_config_dir_path
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
}
caddyfile_file = {
path = "${local.caddy_config_dir_path}/Caddyfile"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/Caddyfile.tftpl",
{
vhosts = var.vhosts
}
)
)
)
}
}
caddy_data_volume_file = {
path = "/etc/containers/systemd/${local.caddy_data_volume_name}.volume"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/caddy_data.volume.tftpl",
{
caddy_data_volume_name = local.caddy_data_volume_name
}
)
)
)
}
caddy_network_file = {
path = "/etc/containers/systemd/${local.caddy_network_name}.network"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/caddy.network.tftpl",
{
caddy_network_name = local.caddy_network_name
}
)
)
)
}
}
caddy_config_directory = {
path = local.caddy_config_dir_path
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
caddy_container_file = {
path = "/etc/containers/systemd/caddy.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/caddy.container.tftpl",
{
caddy_version = local.caddy_version
caddy_data_volume_name = local.caddy_data_volume_name
caddy_config_file_path = "${local.caddy_config_dir_path}/Caddyfile"
caddy_network_name = local.caddy_network_name
}
)
)
)
}
}
caddyfile_file = {
path = "${local.caddy_config_dir_path}/Caddyfile"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/Caddyfile.tftpl",
{
vhosts = var.vhosts
}
)
)
)
}
ignition_config = jsonencode({
ignition = {
version = "3.4.0"
}
caddy_network_file = {
path = "/etc/containers/systemd/${local.caddy_network_name}.network"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/caddy.network.tftpl",
{
caddy_network_name = local.caddy_network_name
}
)
)
)
}
storage = {
disks = [
local.data_disk,
]
filesystems = [
local.caddy_data_filesystem,
]
files = [
local.caddy_data_volume_file,
local.caddyfile_file,
local.caddy_network_file,
local.caddy_container_file,
]
directories = [
local.caddy_config_directory,
]
}
caddy_container_file = {
path = "/etc/containers/systemd/caddy.container"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/caddy.container.tftpl",
{
caddy_version = local.caddy_version
caddy_data_volume_name = local.caddy_data_volume_name
caddy_config_file_path = "${local.caddy_config_dir_path}/Caddyfile"
caddy_network_name = local.caddy_network_name
}
)
)
)
passwd = {
users = [
{
name = "core"
sshAuthorizedKeys = var.ssh_authorized_keys
}
]
}
ignition_config = jsonencode({
ignition = {
version = "3.4.0"
}
storage = {
disks = [
local.data_disk,
]
filesystems = [
local.caddy_data_filesystem,
]
files = [
local.caddy_data_volume_file,
local.caddyfile_file,
local.caddy_network_file,
local.caddy_container_file,
]
directories = [
local.caddy_config_directory,
]
}
passwd = {
users = [
{
name = "core"
sshAuthorizedKeys = var.ssh_authorized_keys
}
]
}
})
})
}

View file

@ -1,3 +1,3 @@
output "config" {
value = local.ignition_config
value = local.ignition_config
}

View file

@ -1,16 +1,16 @@
variable "vhosts" {
type = list(object({
domain = string
upstreams = list(string)
headers_down = optional(list(object({
modifier = optional(string, "")
name = string
value = string
})), [])
}))
type = list(object({
domain = string
upstreams = list(string)
headers_down = optional(list(object({
modifier = optional(string, "")
name = string
value = string
})), [])
}))
}
variable "ssh_authorized_keys" {
type = list(string)
nullable = false
type = list(string)
nullable = false
}

View file

@ -1,452 +1,452 @@
locals {
caddy_frontend_network_name = "caddy-frontend"
caddy_container_name = "caddy"
caddy_version = "2.9.1-alpine"
caddy_config_dir = "/var/opt/caddy"
caddy_frontend_network_name = "caddy-frontend"
caddy_container_name = "caddy"
caddy_version = "2.9.1-alpine"
caddy_config_dir = "/var/opt/caddy"
castopod_frontend_network_name = "castopod-frontend"
castopod_backend_network_name = "castopod-backend"
castopod_media_volume_name = "castopod-media"
castopod_container_name = "castopod"
castopod_frontend_network_name = "castopod-frontend"
castopod_backend_network_name = "castopod-backend"
castopod_media_volume_name = "castopod-media"
castopod_container_name = "castopod"
castopod_db_name = "castopod"
castopod_db_user = "castopod"
castopod_base_url = var.base_url
castopod_db_name = "castopod"
castopod_db_user = "castopod"
castopod_base_url = var.base_url
valkey_container_name = "valkey"
valkey_cache_volume_name = "castopod-cache"
valkey_container_name = "valkey"
valkey_cache_volume_name = "castopod-cache"
mariadb_container_name = "mariadb"
mariadb_data_volume_name = "castopod-db"
mariadb_version = "11.5"
mariadb_container_name = "mariadb"
mariadb_data_volume_name = "castopod-db"
mariadb_version = "11.5"
secrets_part_name = "secrets"
secrets_path = "/var/opt/secrets"
secrets_path_escaped = "var-opt-secrets"
secrets_part_name = "secrets"
secrets_path = "/var/opt/secrets"
secrets_path_escaped = "var-opt-secrets"
data_device_path = "/dev/vdb"
data_device_path = "/dev/vdb"
data_disk = {
device = local.data_device_path
wipeTable = true
partitions = [
data_disk = {
device = local.data_device_path
wipeTable = true
partitions = [
{
label = local.secrets_part_name
number = 1
sizeMiB = 1024
wipePartitionEntry = true
shouldExist = true
resize = true
},
{
label = local.castopod_media_volume_name
number = 2
sizeMiB = 20 * 1024
wipePartitionEntry = true
shouldExist = true
resize = true
},
{
label = local.mariadb_data_volume_name
number = 3
sizeMiB = 5 * 1024
wipePartitionEntry = true
shouldExist = true
resize = true
},
{
label = local.valkey_cache_volume_name
number = 4
sizeMiB = 1024
wipePartitionEntry = true
shouldExist = true
resize = true
},
]
}
caddy_config_directory = {
path = local.caddy_config_dir
user = { id = 0 }
group = { id = 0 }
mode = 448 # 0700
}
caddy_config_file = {
path = "${local.caddy_config_dir}/Caddyfile"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/Caddyfile.tftpl",
{
label = local.secrets_part_name
number = 1
sizeMiB = 1024
wipePartitionEntry = true
shouldExist = true
resize = true
},
{
label = local.castopod_media_volume_name
number = 2
sizeMiB = 20 * 1024
wipePartitionEntry = true
shouldExist = true
resize = true
},
{
label = local.mariadb_data_volume_name
number = 3
sizeMiB = 5 * 1024
wipePartitionEntry = true
shouldExist = true
resize = true
},
{
label = local.valkey_cache_volume_name
number = 4
sizeMiB = 1024
wipePartitionEntry = true
shouldExist = true
resize = true
},
]
}
caddy_config_directory = {
path = local.caddy_config_dir
user = {id = 0}
group = {id = 0}
mode = 448 # 0700
}
caddy_config_file = {
path = "${local.caddy_config_dir}/Caddyfile"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/Caddyfile.tftpl",
{
castopod_domain = var.castopod_domain
castopod_container_name = local.castopod_container_name
}
)
)
)
}
}
caddy_frontend_network_file = {
path = "/etc/containers/systemd/caddy-frontend.network"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/caddy-frontend.network.tftpl",
{
caddy_frontend_network_name = local.caddy_frontend_network_name
}
)
)
)
}
}
caddy_container_file = {
path = "/etc/containers/systemd/caddy.container"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/caddy.container.tftpl",
{
caddy_container_name = local.caddy_container_name
caddy_version = local.caddy_version
caddy_config_dir = local.caddy_config_dir
caddy_frontend_network_name = local.caddy_frontend_network_name
castopod_frontend_network_name = local.castopod_frontend_network_name
castopod_upstream_port = var.castopod_upstream_port
castopod_media_volume_name = local.castopod_media_volume_name
}
)
)
)
}
}
castopod_secrets_filesystem = {
device = "${local.data_device_path}1"
format = "ext4"
label = local.secrets_part_name
}
castopod_secrets_directory = {
path = local.secrets_path
user = {id = 0}
group = {id = 0}
mode = 448 # 0700
}
castopod_secrets_mount_unit = {
name = "${local.secrets_path_escaped}.mount"
enabled = true
contents = templatefile(
"${path.module}/files/secrets.mount.tftpl",
{
secrets_part_name = local.secrets_part_name
secrets_path = local.secrets_path
castopod_domain = var.castopod_domain
castopod_container_name = local.castopod_container_name
}
)
)
)
}
}
castopod_generate_secrets_script_file = {
path = "/var/opt/generate_secrets.sh"
user = {id = 0}
group = {id = 0}
mode = 448 # 0700
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/generate_secrets.sh.tftpl",
{
secrets_path = local.secrets_path
}
)
)
)
}
}
castopod_generate_secrets_service_unit = {
name = "generate_secrets.service"
enabled = true
contents = templatefile(
"${path.module}/files/generate_secrets.service.tftpl",
caddy_frontend_network_file = {
path = "/etc/containers/systemd/caddy-frontend.network"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/caddy-frontend.network.tftpl",
{
secrets_path = local.secrets_path
secrets_path_escaped = local.secrets_path_escaped
caddy_frontend_network_name = local.caddy_frontend_network_name
}
)
)
)
}
}
castopod_frontend_network_file = {
path = "/etc/containers/systemd/${local.castopod_frontend_network_name}.network"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/castopod-frontend.network.tftpl",
{
castopod_frontend_network_name = local.castopod_frontend_network_name
}
)
)
)
}
caddy_container_file = {
path = "/etc/containers/systemd/caddy.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/caddy.container.tftpl",
{
caddy_container_name = local.caddy_container_name
caddy_version = local.caddy_version
caddy_config_dir = local.caddy_config_dir
caddy_frontend_network_name = local.caddy_frontend_network_name
castopod_frontend_network_name = local.castopod_frontend_network_name
castopod_upstream_port = var.castopod_upstream_port
castopod_media_volume_name = local.castopod_media_volume_name
}
)
)
)
}
}
castopod_backend_network_file = {
path = "/etc/containers/systemd/${local.castopod_backend_network_name}.network"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/castopod-backend.network.tftpl",
{
castopod_backend_network_name = local.castopod_backend_network_name
}
)
)
)
}
castopod_secrets_filesystem = {
device = "${local.data_device_path}1"
format = "ext4"
label = local.secrets_part_name
}
castopod_secrets_directory = {
path = local.secrets_path
user = { id = 0 }
group = { id = 0 }
mode = 448 # 0700
}
castopod_secrets_mount_unit = {
name = "${local.secrets_path_escaped}.mount"
enabled = true
contents = templatefile(
"${path.module}/files/secrets.mount.tftpl",
{
secrets_part_name = local.secrets_part_name
secrets_path = local.secrets_path
}
)
}
castopod_generate_secrets_script_file = {
path = "/var/opt/generate_secrets.sh"
user = { id = 0 }
group = { id = 0 }
mode = 448 # 0700
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/generate_secrets.sh.tftpl",
{
secrets_path = local.secrets_path
}
)
)
)
}
}
castopod_media_volume_filesystem = {
device = "${local.data_device_path}2"
format = "ext4"
label = local.castopod_media_volume_name
options = [
"-E", "root_owner=33:33",
]
castopod_generate_secrets_service_unit = {
name = "generate_secrets.service"
enabled = true
contents = templatefile(
"${path.module}/files/generate_secrets.service.tftpl",
{
secrets_path = local.secrets_path
secrets_path_escaped = local.secrets_path_escaped
}
)
}
castopod_frontend_network_file = {
path = "/etc/containers/systemd/${local.castopod_frontend_network_name}.network"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/castopod-frontend.network.tftpl",
{
castopod_frontend_network_name = local.castopod_frontend_network_name
}
)
)
)
}
}
castopod_media_volume_file = {
path = "/etc/containers/systemd/${local.castopod_media_volume_name}.volume"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/castopod-media.volume.tftpl",
{
castopod_media_volume_name = local.castopod_media_volume_name
}
)
)
)
}
castopod_backend_network_file = {
path = "/etc/containers/systemd/${local.castopod_backend_network_name}.network"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/castopod-backend.network.tftpl",
{
castopod_backend_network_name = local.castopod_backend_network_name
}
)
)
)
}
}
mariadb_data_volume_filesystem = {
device = "${local.data_device_path}3"
format = "ext4"
label = local.mariadb_data_volume_name
options = [
"-E", "root_owner=999:999",
]
castopod_media_volume_filesystem = {
device = "${local.data_device_path}2"
format = "ext4"
label = local.castopod_media_volume_name
options = [
"-E", "root_owner=33:33",
]
}
castopod_media_volume_file = {
path = "/etc/containers/systemd/${local.castopod_media_volume_name}.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/castopod-media.volume.tftpl",
{
castopod_media_volume_name = local.castopod_media_volume_name
}
)
)
)
}
}
mariadb_data_volume_file = {
path = "/etc/containers/systemd/${local.mariadb_data_volume_name}.volume"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/mariadb-data.volume.tftpl",
{
mariadb_data_volume_name = local.mariadb_data_volume_name
}
)
)
)
}
mariadb_data_volume_filesystem = {
device = "${local.data_device_path}3"
format = "ext4"
label = local.mariadb_data_volume_name
options = [
"-E", "root_owner=999:999",
]
}
mariadb_data_volume_file = {
path = "/etc/containers/systemd/${local.mariadb_data_volume_name}.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/mariadb-data.volume.tftpl",
{
mariadb_data_volume_name = local.mariadb_data_volume_name
}
)
)
)
}
}
mariadb_container_file = {
path = "/etc/containers/systemd/${local.mariadb_container_name}.container"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/mariadb.container.tftpl",
{
mariadb_container_name = local.mariadb_container_name
mariadb_version = local.mariadb_version
mariadb_data_volume_name = local.mariadb_data_volume_name
castopod_backend_network_name = local.castopod_backend_network_name
castopod_db_name = local.castopod_db_name
castopod_db_user = local.castopod_db_user
secrets_path = local.secrets_path
}
)
)
)
}
mariadb_container_file = {
path = "/etc/containers/systemd/${local.mariadb_container_name}.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/mariadb.container.tftpl",
{
mariadb_container_name = local.mariadb_container_name
mariadb_version = local.mariadb_version
mariadb_data_volume_name = local.mariadb_data_volume_name
castopod_backend_network_name = local.castopod_backend_network_name
castopod_db_name = local.castopod_db_name
castopod_db_user = local.castopod_db_user
secrets_path = local.secrets_path
}
)
)
)
}
}
valkey_cache_volume_filesystem = {
device = "${local.data_device_path}4"
format = "ext4"
label = local.valkey_cache_volume_name
options = [
"-E", "root_owner=999:999",
]
valkey_cache_volume_filesystem = {
device = "${local.data_device_path}4"
format = "ext4"
label = local.valkey_cache_volume_name
options = [
"-E", "root_owner=999:999",
]
}
valkey_cache_volume_file = {
path = "/etc/containers/systemd/${local.valkey_cache_volume_name}.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/valkey.volume.tftpl",
{
valkey_cache_volume_name = local.valkey_cache_volume_name
}
)
)
)
}
}
valkey_cache_volume_file = {
path = "/etc/containers/systemd/${local.valkey_cache_volume_name}.volume"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/valkey.volume.tftpl",
{
valkey_cache_volume_name = local.valkey_cache_volume_name
}
)
)
)
}
valkey_container_file = {
path = "/etc/containers/systemd/${local.valkey_container_name}.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/valkey.container.tftpl",
{
valkey_container_name = local.valkey_container_name
valkey_version = "8.0-alpine"
valkey_cache_volume_name = local.valkey_cache_volume_name
castopod_backend_network_name = local.castopod_backend_network_name
secrets_path = local.secrets_path
}
)
)
)
}
}
valkey_container_file = {
path = "/etc/containers/systemd/${local.valkey_container_name}.container"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/valkey.container.tftpl",
{
valkey_container_name = local.valkey_container_name
valkey_version = "8.0-alpine"
valkey_cache_volume_name = local.valkey_cache_volume_name
castopod_backend_network_name = local.castopod_backend_network_name
secrets_path = local.secrets_path
}
)
)
)
}
castopod_container_file = {
path = "/etc/containers/systemd/${local.castopod_container_name}.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile("${path.module}/files/castopod.container.tftpl", {
castopod_version = "1.13.2",
castopod_container_name = local.castopod_container_name
castopod_frontend_network_name = local.castopod_frontend_network_name
castopod_backend_network_name = local.castopod_backend_network_name
castopod_media_volume_name = local.castopod_media_volume_name
castopod_db_name = local.castopod_db_name
castopod_db_user = local.castopod_db_user
castopod_base_url = var.base_url
mariadb_container_name = local.mariadb_container_name
valkey_container_name = local.valkey_container_name
secrets_path = local.secrets_path
})
)
)
}
}
castopod_container_file = {
path = "/etc/containers/systemd/${local.castopod_container_name}.container"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile("${path.module}/files/castopod.container.tftpl", {
castopod_version = "1.13.2",
castopod_container_name = local.castopod_container_name
castopod_frontend_network_name = local.castopod_frontend_network_name
castopod_backend_network_name = local.castopod_backend_network_name
castopod_media_volume_name = local.castopod_media_volume_name
castopod_db_name = local.castopod_db_name
castopod_db_user = local.castopod_db_user
castopod_base_url = var.base_url
mariadb_container_name = local.mariadb_container_name
valkey_container_name = local.valkey_container_name
secrets_path = local.secrets_path
})
)
)
}
ignition_config = jsonencode({
ignition = {
version = "3.4.0"
}
ignition_config = jsonencode({
ignition = {
version = "3.4.0"
storage = {
disks = [
local.data_disk,
]
filesystems = [
local.castopod_secrets_filesystem,
local.castopod_media_volume_filesystem,
local.mariadb_data_volume_filesystem,
local.valkey_cache_volume_filesystem,
]
files = [
local.caddy_config_file,
local.caddy_frontend_network_file,
local.caddy_container_file,
local.castopod_generate_secrets_script_file,
local.castopod_frontend_network_file,
local.castopod_backend_network_file,
local.castopod_media_volume_file,
local.mariadb_data_volume_file,
local.mariadb_container_file,
local.valkey_cache_volume_file,
local.valkey_container_file,
local.castopod_container_file,
]
directories = [
local.caddy_config_directory,
local.castopod_secrets_directory,
]
}
systemd = {
units = [
local.castopod_secrets_mount_unit,
local.castopod_generate_secrets_service_unit,
]
}
passwd = {
users = [
{
name = "core"
sshAuthorizedKeys = var.ssh_authorized_keys
}
storage = {
disks = [
local.data_disk,
]
filesystems = [
local.castopod_secrets_filesystem,
local.castopod_media_volume_filesystem,
local.mariadb_data_volume_filesystem,
local.valkey_cache_volume_filesystem,
]
files = [
local.caddy_config_file,
local.caddy_frontend_network_file,
local.caddy_container_file,
local.castopod_generate_secrets_script_file,
local.castopod_frontend_network_file,
local.castopod_backend_network_file,
local.castopod_media_volume_file,
local.mariadb_data_volume_file,
local.mariadb_container_file,
local.valkey_cache_volume_file,
local.valkey_container_file,
local.castopod_container_file,
]
directories = [
local.caddy_config_directory,
local.castopod_secrets_directory,
]
}
systemd = {
units = [
local.castopod_secrets_mount_unit,
local.castopod_generate_secrets_service_unit,
]
}
passwd = {
users = [
{
name = "core"
sshAuthorizedKeys = var.ssh_authorized_keys
}
]
}
})
]
}
})
}

View file

@ -1,3 +1,3 @@
output "config" {
value = local.ignition_config
value = local.ignition_config
}

View file

@ -1,19 +1,19 @@
variable "ssh_authorized_keys" {
type = list(string)
nullable = false
type = list(string)
nullable = false
}
variable "base_url" {
type = string
nullable = false
type = string
nullable = false
}
variable "castopod_domain" {
type = string
nullable = false
type = string
nullable = false
}
variable "castopod_upstream_port" {
type = number
nullable = false
type = number
nullable = false
}

View file

@ -1,7 +1,7 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
source = "bpg/proxmox"
version = "~>0.56.1"
}
}
@ -10,159 +10,159 @@ terraform {
locals {
core_user = {
name = "core"
password_hash = "$6$vDMAZf/yOO6mEbcs$6VE7WD8T9/PeotszMFxatOQxB/rFmLDWsNajg4sI0O47OikSuVpqPjkxRbzcueiXn6rBUY1ubCHlp0nnoZ1VI1"
}
core_user = {
name = "core"
password_hash = "$6$vDMAZf/yOO6mEbcs$6VE7WD8T9/PeotszMFxatOQxB/rFmLDWsNajg4sI0O47OikSuVpqPjkxRbzcueiXn6rBUY1ubCHlp0nnoZ1VI1"
}
hostname_file = {
path = "/etc/hostname"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
hostname_file = {
path = "/etc/hostname"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(var.instance_name)
)
}
}
ignition_configuration = jsonencode({
ignition = {
version = "3.4.0"
}
storage = {
files = [
{
path = "/etc/hostname"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(var.instance_name)
"data:text/plain;base64,%s",
base64encode(var.instance_name)
)
}
}
},
]
}
ignition_configuration = jsonencode({
ignition = {
version = "3.4.0"
}
storage = {
files = [
{
path = "/etc/hostname"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(var.instance_name)
)
}
},
]
}
passwd = {
users = [
local.core_user,
]
}
})
passwd = {
users = [
local.core_user,
]
}
})
}
resource "random_pet" "config_name" {
length = 4
length = 4
}
locals {
generated_ignition_config_file = "${path.module}/dns_resolver_ignition_config_${random_pet.config_name.id}.ign"
generated_ignition_config_file = "${path.module}/dns_resolver_ignition_config_${random_pet.config_name.id}.ign"
}
resource "local_file" "sftp_script_for_ignition_file" {
content = <<EOT
content = <<EOT
cd writable
-rm ${var.pve_vm_id}.ign
put ${local.generated_ignition_config_file} ${var.pve_vm_id}.ign
EOT
filename = "${path.module}/dns_resolver_sftp_script_for_ignition_config_${random_pet.config_name.id}"
file_permission = "0644"
filename = "${path.module}/dns_resolver_sftp_script_for_ignition_config_${random_pet.config_name.id}"
file_permission = "0644"
}
resource "local_file" "dns_resolver_ignition_config" {
content = local.ignition_configuration
filename = local.generated_ignition_config_file
file_permission = "0644"
content = local.ignition_configuration
filename = local.generated_ignition_config_file
file_permission = "0644"
provisioner "local-exec" {
command = <<EOT
provisioner "local-exec" {
command = <<EOT
sftp -P ${var.netboot_server_sftp_port} \
-o ProxyJump=${var.pve_ssh_user}@${var.pve_ssh_host} \
-b "${path.module}/dns_resolver_sftp_script_for_ignition_config_${random_pet.config_name.id}" \
terraform_ignition@${var.netboot_server_ip_address}
EOT
}
lifecycle {
replace_triggered_by = [local_file.sftp_script_for_ignition_file]
}
}
lifecycle {
replace_triggered_by = [local_file.sftp_script_for_ignition_file]
}
}
resource "local_file" "sftp_script_for_dhcp_config" {
content = <<EOT
content = <<EOT
cd writable
-rm ${var.pve_vm_id}.conf
put ${path.module}/dns_resolver_dhcp_config_${random_pet.config_name.id}.conf ${var.pve_vm_id}.conf
EOT
filename = "${path.module}/dns_resolver_sftp_script_for_dhcp_config_${random_pet.config_name.id}"
file_permission = "0644"
filename = "${path.module}/dns_resolver_sftp_script_for_dhcp_config_${random_pet.config_name.id}"
file_permission = "0644"
}
resource "local_file" "dhcp_config" {
depends_on = [ local_file.sftp_script_for_dhcp_config ]
content = templatefile(
"${path.module}/files/dhcp_config.conf.tftpl",
{
vm_id = var.pve_vm_id
host_ip = cidrhost(var.prod_network.prefix, var.pve_vm_id)
mac_address = var.prod_network.mac_address
}
)
filename = "${path.module}/dns_resolver_dhcp_config_${random_pet.config_name.id}.conf"
file_permission = "0644"
depends_on = [local_file.sftp_script_for_dhcp_config]
content = templatefile(
"${path.module}/files/dhcp_config.conf.tftpl",
{
vm_id = var.pve_vm_id
host_ip = cidrhost(var.prod_network.prefix, var.pve_vm_id)
mac_address = var.prod_network.mac_address
}
)
filename = "${path.module}/dns_resolver_dhcp_config_${random_pet.config_name.id}.conf"
file_permission = "0644"
provisioner "local-exec" {
command = <<EOT
provisioner "local-exec" {
command = <<EOT
sftp -P ${var.netboot_server_sftp_port} \
-o ProxyJump=${var.pve_ssh_user}@${var.pve_ssh_host} \
-b "${path.module}/dns_resolver_sftp_script_for_dhcp_config_${random_pet.config_name.id}" \
terraform_dhcp@${var.netboot_server_ip_address}
EOT
}
}
lifecycle {
replace_triggered_by = [local_file.sftp_script_for_dhcp_config ]
}
lifecycle {
replace_triggered_by = [local_file.sftp_script_for_dhcp_config]
}
}
resource "proxmox_virtual_environment_vm" "netboot_server" {
name = var.instance_name
node_name = var.pve_node_name
vm_id = var.pve_vm_id
cpu {
architecture = "x86_64"
type = "host"
sockets = 1
cores = 4
}
name = var.instance_name
node_name = var.pve_node_name
vm_id = var.pve_vm_id
memory {
dedicated = 4096
}
cpu {
architecture = "x86_64"
type = "host"
sockets = 1
cores = 4
}
disk {
datastore_id = var.pve_storage_id
interface = "virtio0"
size = 10
}
memory {
dedicated = 4096
}
network_device {
bridge = "prod"
model = "virtio"
mac_address = var.prod_network.mac_address
}
disk {
datastore_id = var.pve_storage_id
interface = "virtio0"
size = 10
}
boot_order = ["net0"]
network_device {
bridge = "prod"
model = "virtio"
mac_address = var.prod_network.mac_address
}
operating_system {
type = "l26"
}
boot_order = ["net0"]
vga {}
serial_device{}
operating_system {
type = "l26"
}
vga {}
serial_device {}
}

View file

@ -1,70 +1,70 @@
variable "pve_node_name" {
type = string
nullable = false
type = string
nullable = false
}
variable "pve_storage_id" {
type = string
nullable = false
type = string
nullable = false
}
variable "pve_vm_id" {
type = number
nullable = false
type = number
nullable = false
}
variable "pve_ssh_user" {
type = string
nullable = false
default = "root"
type = string
nullable = false
default = "root"
}
variable "pve_ssh_host" {
type = string
nullable = false
default = "proxmox.broken-by-design.fr"
type = string
nullable = false
default = "proxmox.broken-by-design.fr"
}
variable "netboot_server_ip_address" {
type = string
nullable = false
default = "10.109.0.2"
type = string
nullable = false
default = "10.109.0.2"
}
variable "netboot_server_sftp_port" {
type = number
nullable = false
default = 2222
type = number
nullable = false
default = 2222
}
variable "instance_name" {
type = string
default = "knot-resolver"
type = string
default = "knot-resolver"
}
variable "admin_network" {
type = object({
name = string
prefix = string
mac_address = string
})
nullable = false
type = object({
name = string
prefix = string
mac_address = string
})
nullable = false
}
variable "prod_network" {
type = object({
name = string
prefix = string
mac_address = string
})
nullable = false
type = object({
name = string
prefix = string
mac_address = string
})
nullable = false
}
variable "monitoring_network" {
type = object({
name = string
prefix = string
mac_address = string
})
nullable = false
type = object({
name = string
prefix = string
mac_address = string
})
nullable = false
}

View file

@ -1,149 +1,149 @@
locals {
caddy_data_filesystem = {
device = "${local.data_device_path}-part1"
format = "ext4"
label = "caddy_data"
caddy_data_filesystem = {
device = "${local.data_device_path}-part1"
format = "ext4"
label = "caddy_data"
}
caddy_data_volume_file = {
path = "/etc/containers/systemd/caddy_data.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/caddy_data.volume"))
)
}
}
caddy_data_volume_file = {
path = "/etc/containers/systemd/caddy_data.volume"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/caddy_data.volume"))
)
}
fcos_images_filesystem = {
device = "${local.data_device_path}-part4"
format = "ext4"
label = "fcos_images"
}
fcos_images_volume_file = {
path = "/etc/containers/systemd/fcos_images.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/fcos_images.volume"))
)
}
}
fcos_images_filesystem = {
device = "${local.data_device_path}-part4"
format = "ext4"
label = "fcos_images"
image_downloader_image_file = {
path = "/etc/containers/systemd/image_downloader.image"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/image_downloader.image"))
)
}
}
fcos_images_volume_file = {
path = "/etc/containers/systemd/fcos_images.volume"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/fcos_images.volume"))
)
}
image_downloader_container_file = {
path = "/etc/containers/systemd/image_downloader.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/image_downloader.container"))
)
}
}
image_downloader_image_file = {
path = "/etc/containers/systemd/image_downloader.image"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/image_downloader.image"))
)
}
caddy_builddir_dir = {
path = "/root/caddy"
user = { id = 0 }
group = { id = 0 }
mode = 448 # 0700
}
caddyfile_file = {
path = "/root/caddy/Caddyfile"
user = { id = 0 }
group = { id = 0 }
mode = 384 # 0600
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/Caddyfile"))
)
}
}
image_downloader_container_file = {
path = "/etc/containers/systemd/image_downloader.container"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/image_downloader.container"))
)
}
ipxe_script_file = {
path = "/root/caddy/ipxe.script"
user = { id = 0 }
group = { id = 0 }
mode = 384 # 0600
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/ipxe.script"))
)
}
}
caddy_builddir_dir = {
path = "/root/caddy"
user = {id = 0}
group = {id = 0}
mode = 448 # 0700
caddy_containerfile_file = {
path = "/root/caddy/Containerfile"
user = { id = 0 }
group = { id = 0 }
mode = 384 # 0600
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/caddy.Containerfile"))
)
}
}
caddyfile_file = {
path = "/root/caddy/Caddyfile"
user = {id = 0}
group = {id = 0}
mode = 384 # 0600
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/Caddyfile"))
)
}
caddy_container_file = {
path = "/etc/containers/systemd/caddy.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/caddy.container"))
)
}
}
ipxe_script_file = {
path = "/root/caddy/ipxe.script"
user = {id = 0}
group = {id = 0}
mode = 384 # 0600
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/ipxe.script"))
)
}
}
caddy_filesystems = [
local.caddy_data_filesystem,
local.fcos_images_filesystem,
]
caddy_containerfile_file = {
path = "/root/caddy/Containerfile"
user = {id = 0}
group = {id = 0}
mode = 384 # 0600
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/caddy.Containerfile"))
)
}
}
caddy_directories = [
local.caddy_builddir_dir,
]
caddy_container_file = {
path = "/etc/containers/systemd/caddy.container"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/caddy/caddy.container"))
)
}
}
caddy_files = [
local.caddy_data_volume_file,
local.fcos_images_volume_file,
local.image_downloader_image_file,
local.image_downloader_container_file,
local.caddyfile_file,
local.ipxe_script_file,
local.caddy_containerfile_file,
local.caddy_container_file,
]
caddy_filesystems = [
local.caddy_data_filesystem,
local.fcos_images_filesystem,
]
caddy_directories = [
local.caddy_builddir_dir,
]
caddy_files = [
local.caddy_data_volume_file,
local.fcos_images_volume_file,
local.image_downloader_image_file,
local.image_downloader_container_file,
local.caddyfile_file,
local.ipxe_script_file,
local.caddy_containerfile_file,
local.caddy_container_file,
]
caddy_systemd_units = [
]
caddy_systemd_units = [
]
}

View file

@ -1,126 +1,126 @@
locals {
dhcp_config_path_systemd_unit = {
name = "dhcp_config.path"
enabled = true
contents = templatefile(
"${path.module}/files/dhcp/dhcp_config.path.tftpl",
{
path = "/var/lib/containers/storage/volumes/dhcp_config/_data/writable/"
}
)
dhcp_config_path_systemd_unit = {
name = "dhcp_config.path"
enabled = true
contents = templatefile(
"${path.module}/files/dhcp/dhcp_config.path.tftpl",
{
path = "/var/lib/containers/storage/volumes/dhcp_config/_data/writable/"
}
)
}
dhcp_config_service_systemd_unit = {
name = "dhcp_config.service"
enabled = false
contents = file("${path.module}/files/dhcp/dhcp_config.service")
}
dhcp_data_filesystem = {
device = "${local.data_device_path}-part3"
format = "ext4"
label = "dhcp_data"
}
dhcp_data_volume_file = {
path = "/etc/containers/systemd/dhcp_data.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/dhcp/dhcp_data.volume"))
)
}
}
dhcp_config_service_systemd_unit = {
name = "dhcp_config.service"
enabled = false
contents = file("${path.module}/files/dhcp/dhcp_config.service")
dhcp_builddir_dir = {
path = "/root/dhcp"
user = { id = 0 }
group = { id = 0 }
mode = 448 # 0700
}
dnsmasq_base_config_file = {
path = "/root/dhcp/dnsmasq.conf"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(templatefile(
"${path.module}/files/dhcp/dnsmasq.conf.tftpl",
{
dhcp_server_ip_addr = var.dhcp_server_ip_addr
dhcp_range = split("/", var.dhcp_range)[0]
dhcp_range_netmask = cidrnetmask(var.dhcp_range)
dhcp_router = var.dhcp_gateway
config_extension_dir = "/etc/dnsmasq.d/writable/"
}
))
)
}
}
dhcp_data_filesystem = {
device = "${local.data_device_path}-part3"
format = "ext4"
label = "dhcp_data"
generate_dhcp_options_script_file = {
path = "/var/roothome/generate_dhcp_options.sh"
user = { id = 0 }
group = { id = 0 }
mode = 448 # 0700
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/dhcp/generate_dhcp_options.sh"))
)
}
}
dhcp_data_volume_file = {
path = "/etc/containers/systemd/dhcp_data.volume"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/dhcp/dhcp_data.volume"))
)
}
dhcp_containerfile_file = {
path = "/root/dhcp/Containerfile"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/dhcp/dnsmasq.Containerfile"))
)
}
}
dhcp_builddir_dir = {
path = "/root/dhcp"
user = {id = 0}
group = {id = 0}
mode = 448 # 0700
dhcp_container_file = {
path = "/etc/containers/systemd/dnsmasq_container.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/dhcp/dnsmasq_container.container"))
)
}
}
dnsmasq_base_config_file = {
path = "/root/dhcp/dnsmasq.conf"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(templatefile(
"${path.module}/files/dhcp/dnsmasq.conf.tftpl",
{
dhcp_server_ip_addr = var.dhcp_server_ip_addr
dhcp_range = split("/", var.dhcp_range)[0]
dhcp_range_netmask = cidrnetmask(var.dhcp_range)
dhcp_router = var.dhcp_gateway
config_extension_dir = "/etc/dnsmasq.d/writable/"
}
))
)
}
}
dhcp_filesystems = [
local.dhcp_data_filesystem,
]
generate_dhcp_options_script_file = {
path = "/var/roothome/generate_dhcp_options.sh"
user = {id = 0}
group = {id = 0}
mode = 448 # 0700
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/dhcp/generate_dhcp_options.sh"))
)
}
}
dhcp_directories = [
local.dhcp_builddir_dir,
]
dhcp_containerfile_file = {
path = "/root/dhcp/Containerfile"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/dhcp/dnsmasq.Containerfile"))
)
}
}
dhcp_files = [
local.dhcp_data_volume_file,
local.dnsmasq_base_config_file,
local.generate_dhcp_options_script_file,
local.dhcp_containerfile_file,
local.dhcp_container_file,
]
dhcp_container_file = {
path = "/etc/containers/systemd/dnsmasq_container.container"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/dhcp/dnsmasq_container.container"))
)
}
}
dhcp_filesystems = [
local.dhcp_data_filesystem,
]
dhcp_directories = [
local.dhcp_builddir_dir,
]
dhcp_files = [
local.dhcp_data_volume_file,
local.dnsmasq_base_config_file,
local.generate_dhcp_options_script_file,
local.dhcp_containerfile_file,
local.dhcp_container_file,
]
dhcp_systemd_units = [
local.dhcp_config_path_systemd_unit,
local.dhcp_config_service_systemd_unit,
]
dhcp_systemd_units = [
local.dhcp_config_path_systemd_unit,
local.dhcp_config_service_systemd_unit,
]
}

View file

@ -1,263 +1,263 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox",
version = "~>0.56.1"
}
random = {
source = "hashicorp/random"
}
local = {
source = "hashicorp/local"
}
required_providers {
proxmox = {
source = "bpg/proxmox",
version = "~>0.56.1"
}
required_version = ">=1.6.2"
random = {
source = "hashicorp/random"
}
local = {
source = "hashicorp/local"
}
}
required_version = ">=1.6.2"
}
module "sshd" {
source = "../sshd"
address_family = "inet"
source = "../sshd"
address_family = "inet"
}
locals {
data_device_path = "/dev/disk/by-path/pci-0000:00:0a.0"
data_device_path = "/dev/disk/by-path/pci-0000:00:0a.0"
data_disk = {
device = local.data_device_path
partitions = [
{
label = "caddy_data"
number = 1
startMiB = 0
sizeMiB = 100
typeGuid = "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
resize = true
},
{
label = "dhcp_config"
number = 2
startMiB = 0
sizeMiB = 10
typeGuid= "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
resize = true
},
{
label = "dhcp_data"
number = 3
startMiB = 0
sizeMiB = 10
typeGuid= "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
resize = true
},
{
label = "fcos_images"
number = 4
startMiB = 0
sizeMiB = 8192
typeGuid= "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
resize = true
},
{
label = "ign_files"
number = 5
startMiB = 0
sizeMiB = 512
typeGuid= "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
resize = true
},
{
label = "ssh_keys"
number = 6
startMiB = 0
sizeMiB = 10
typeGuid= "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
resize = true
}
]
data_disk = {
device = local.data_device_path
partitions = [
{
label = "caddy_data"
number = 1
startMiB = 0
sizeMiB = 100
typeGuid = "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
resize = true
},
{
label = "dhcp_config"
number = 2
startMiB = 0
sizeMiB = 10
typeGuid = "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
resize = true
},
{
label = "dhcp_data"
number = 3
startMiB = 0
sizeMiB = 10
typeGuid = "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
resize = true
},
{
label = "fcos_images"
number = 4
startMiB = 0
sizeMiB = 8192
typeGuid = "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
resize = true
},
{
label = "ign_files"
number = 5
startMiB = 0
sizeMiB = 512
typeGuid = "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
resize = true
},
{
label = "ssh_keys"
number = 6
startMiB = 0
sizeMiB = 10
typeGuid = "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
resize = true
}
]
}
hostname_file = {
path = "/etc/hostname"
user = { id = 0 }
group = { id = 0 }
mode = 420 #0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(var.hostname),
)
}
}
hostname_file = {
path = "/etc/hostname"
user = {id = 0}
group = {id = 0}
mode = 420 #0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(var.hostname),
)
}
network_config_file = {
path = "/etc/NetworkManager/system-connections/${var.dhcp_iface}.nmconnection"
user = { id = 0 }
group = { id = 0 }
mode = 384 #0600
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(templatefile(
"${path.module}/files/dhcp_nmconnection.tftpl",
{
iface = var.dhcp_iface
ip_address = var.dhcp_server_ip_addr
netmask = split("/", var.dhcp_range)[1]
gateway = var.dhcp_gateway
dns_server = var.dhcp_gateway
}
))
)
}
}
network_config_file = {
path = "/etc/NetworkManager/system-connections/${var.dhcp_iface}.nmconnection"
user = {id = 0}
group = {id = 0}
mode = 384 #0600
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(templatefile(
"${path.module}/files/dhcp_nmconnection.tftpl",
{
iface = var.dhcp_iface
ip_address = var.dhcp_server_ip_addr
netmask = split("/", var.dhcp_range)[1]
gateway = var.dhcp_gateway
dns_server = var.dhcp_gateway
}
))
)
}
core_user = {
name = "core"
passwordHash = "$6$vDMAZf/yOO6mEbcs$6VE7WD8T9/PeotszMFxatOQxB/rFmLDWsNajg4sI0O47OikSuVpqPjkxRbzcueiXn6rBUY1ubCHlp0nnoZ1VI1" # password is "tititoto"; only there for debug; please remove in prod
sshAuthorizedKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFQnLSYLGzUVmDMMGgEKCNgfAOkIuqhOMGGuvgskACum fmaury@2a01cb00142b3d00ee15f742996f2775.ipv6.abo.wanadoo.fr"
]
}
ignition_config = jsonencode({
ignition = {
version = "3.4.0"
}
core_user = {
name = "core"
passwordHash = "$6$vDMAZf/yOO6mEbcs$6VE7WD8T9/PeotszMFxatOQxB/rFmLDWsNajg4sI0O47OikSuVpqPjkxRbzcueiXn6rBUY1ubCHlp0nnoZ1VI1" # password is "tititoto"; only there for debug; please remove in prod
sshAuthorizedKeys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFQnLSYLGzUVmDMMGgEKCNgfAOkIuqhOMGGuvgskACum fmaury@2a01cb00142b3d00ee15f742996f2775.ipv6.abo.wanadoo.fr"
]
storage = {
disks = [
local.data_disk,
]
filesystems = concat(
local.dhcp_filesystems,
local.caddy_filesystems,
local.sftp_filesystems,
)
directories = concat(
local.dhcp_directories,
local.caddy_directories,
local.sftp_directories,
)
files = concat(
[
local.hostname_file,
local.network_config_file,
],
module.sshd.files,
local.dhcp_files,
local.caddy_files,
local.sftp_files,
)
}
ignition_config = jsonencode({
ignition = {
version = "3.4.0"
}
storage = {
disks = [
local.data_disk,
]
filesystems = concat(
local.dhcp_filesystems,
local.caddy_filesystems,
local.sftp_filesystems,
)
directories = concat(
local.dhcp_directories,
local.caddy_directories,
local.sftp_directories,
)
files = concat(
[
local.hostname_file,
local.network_config_file,
],
module.sshd.files,
local.dhcp_files,
local.caddy_files,
local.sftp_files,
)
}
systemd = {
units = concat(
local.dhcp_systemd_units,
local.caddy_systemd_units,
module.sshd.systemd_units,
)
}
passwd = {
users = concat(
[
local.core_user
],
module.sshd.users,
)
groups = module.sshd.groups
}
})
systemd = {
units = concat(
local.dhcp_systemd_units,
local.caddy_systemd_units,
module.sshd.systemd_units,
)
}
passwd = {
users = concat(
[
local.core_user
],
module.sshd.users,
)
groups = module.sshd.groups
}
})
}
resource "random_pet" "config_name" {
length = 4
length = 4
}
locals {
generated_ignition_config_file = "netboot_server_ignition_config_${random_pet.config_name.id}.ign"
generated_ignition_config_file = "netboot_server_ignition_config_${random_pet.config_name.id}.ign"
}
resource "local_file" "api_token" {
content = "Authorization: PVEAPIToken=${var.pve_api_token}"
filename = "pve_api_token"
file_permission = "0600"
content = "Authorization: PVEAPIToken=${var.pve_api_token}"
filename = "pve_api_token"
file_permission = "0600"
}
resource "local_file" "netboot_server_ignition_config" {
depends_on = [ local_file.api_token ]
content = local.ignition_config
filename = format("${path.module}/%s", local.generated_ignition_config_file)
file_permission = "0644"
depends_on = [local_file.api_token]
content = local.ignition_config
filename = format("${path.module}/%s", local.generated_ignition_config_file)
file_permission = "0644"
# Download ISO to customize
provisioner "local-exec" {
command = <<EOT
# Download ISO to customize
provisioner "local-exec" {
command = <<EOT
podman run --security-opt label=disable --pull=always --rm -v ${path.cwd}/${path.module}:/data -w /data \
quay.io/coreos/coreos-installer:release download -f iso
EOT
}
}
# Customize ISO
provisioner "local-exec" {
environment = {
KERNEL_ARG = "--live-karg-append=coreos.liveiso.fromram"
IGNITION_ARG = "--live-ignition=./${local.generated_ignition_config_file}"
}
command = <<EOT
# Customize ISO
provisioner "local-exec" {
environment = {
KERNEL_ARG = "--live-karg-append=coreos.liveiso.fromram"
IGNITION_ARG = "--live-ignition=./${local.generated_ignition_config_file}"
}
command = <<EOT
rm -f ${path.module}/customized-${random_pet.config_name.id}.iso && \
podman run --security-opt label=disable --pull=always --rm -v ${path.cwd}/${path.module}:/data -w /data \
quay.io/coreos/coreos-installer:release \
iso customize $KERNEL_ARG $IGNITION_ARG \
-o customized-${random_pet.config_name.id}.iso $(basename $(ls -1 ${path.module}/fedora-coreos-*-live.x86_64.iso))
EOT
}
}
provisioner "local-exec" {
command = <<EOT
provisioner "local-exec" {
command = <<EOT
curl \
-F "content=iso" \
-F "filename=@${path.module}/customized-${random_pet.config_name.id}.iso;type=application/vnd.efi.iso;filename=fcos-netboot-server-${random_pet.config_name.id}.iso" \
-H "@${local_file.api_token.filename}" \
"${var.pve_api_base_url}api2/json/nodes/${var.pve_node_name}/storage/${var.pve_storage_id}/upload"
EOT
}
}
}
resource "proxmox_virtual_environment_vm" "netboot_server" {
name = "netboot-server"
node_name = var.pve_node_name
vm_id = var.pve_vm_id
cpu {
architecture = "x86_64"
type = "host"
sockets = 1
cores = 4
}
name = "netboot-server"
node_name = var.pve_node_name
vm_id = var.pve_vm_id
memory {
dedicated = 4096
}
cpu {
architecture = "x86_64"
type = "host"
sockets = 1
cores = 4
}
cdrom {
enabled = true
file_id = "${var.pve_storage_id}:iso/fcos-netboot-server-${random_pet.config_name.id}.iso"
}
memory {
dedicated = 4096
}
disk {
datastore_id = var.pve_storage_id
interface = "virtio0"
size = 10
}
cdrom {
enabled = true
file_id = "${var.pve_storage_id}:iso/fcos-netboot-server-${random_pet.config_name.id}.iso"
}
network_device {
bridge = var.prod_network_name
model = "virtio"
}
disk {
datastore_id = var.pve_storage_id
interface = "virtio0"
size = 10
}
operating_system {
type = "l26"
}
network_device {
bridge = var.prod_network_name
model = "virtio"
}
keyboard_layout = "fr"
vga {}
serial_device{}
operating_system {
type = "l26"
}
keyboard_layout = "fr"
vga {}
serial_device {}
}

View file

@ -1,191 +1,191 @@
module "sftp" {
source = "../sshd"
base_config_dir = "/var/roothome/sftp"
use_socket_activation = true
address_family = "inet"
listen_port = 22
sftp_only = true
chrooted_users = local.chrooted_users
host_keys = ["/data/ssh_keys/ssh_host_ed25519_key"]
source = "../sshd"
base_config_dir = "/var/roothome/sftp"
use_socket_activation = true
address_family = "inet"
listen_port = 22
sftp_only = true
chrooted_users = local.chrooted_users
host_keys = ["/data/ssh_keys/ssh_host_ed25519_key"]
}
locals {
sftp_keys_filesystem = {
device = "${local.data_device_path}-part6"
format = "ext4"
label = "ssh_keys"
sftp_keys_filesystem = {
device = "${local.data_device_path}-part6"
format = "ext4"
label = "ssh_keys"
}
chrooted_users = [
{
username = "terraform_dhcp"
chroot = "/data/dhcp_config"
ssh_public_key = var.ssh_public_key_opentofu_netboot_server
},
{
username = "terraform_ignition"
chroot = "/data/ign_files"
ssh_public_key = var.ssh_public_key_opentofu_netboot_server
}
]
chrooted_users = [
{
username = "terraform_dhcp"
chroot = "/data/dhcp_config"
ssh_public_key = var.ssh_public_key_opentofu_netboot_server
},
{
username = "terraform_ignition"
chroot = "/data/ign_files"
ssh_public_key = var.ssh_public_key_opentofu_netboot_server
}
]
sftp_build_dir = {
path = "/var/roothome/sftp"
user = { id = 0 }
group = { id = 0 }
mode = 448 # 0700
}
sftp_build_dir = {
path = "/var/roothome/sftp"
user = {id = 0}
group = {id = 0}
mode = 448 # 0700
sftp_containerfile_file = {
path = "/var/roothome/sftp/Containerfile"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(templatefile(
"${path.module}/files/sftp/Containerfile.tftpl",
{
chrooted_users = local.chrooted_users
}
))
)
}
}
sftp_containerfile_file = {
path = "/var/roothome/sftp/Containerfile"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(templatefile(
"${path.module}/files/sftp/Containerfile.tftpl",
{
chrooted_users = local.chrooted_users
}
))
)
}
sftp_keys_volume_file = {
path = "/etc/containers/systemd/ssh_keys.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/sftp/ssh_keys.volume"))
)
}
}
sftp_keys_volume_file = {
path = "/etc/containers/systemd/ssh_keys.volume"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/sftp/ssh_keys.volume"))
)
}
sftp_init_keys_container_file = {
path = "/etc/containers/systemd/sftp_init_keys.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/sftp/sftp_init_keys.container"))
)
}
}
sftp_init_keys_container_file = {
path = "/etc/containers/systemd/sftp_init_keys.container"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/sftp/sftp_init_keys.container"))
)
}
sftp_container_file = {
path = "/etc/containers/systemd/sftp.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(templatefile(
"${path.module}/files/sftp/sftp.container.tftpl",
{
internal_port = 22
external_port = 2222
}
))
)
}
}
sftp_container_file = {
path = "/etc/containers/systemd/sftp.container"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(templatefile(
"${path.module}/files/sftp/sftp.container.tftpl",
{
internal_port = 22
external_port = 2222
}
))
)
}
dhcp_config_filesystem = {
device = "${local.data_device_path}-part2"
format = "ext4"
label = "dhcp_config"
}
sftp_dhcp_config_init_container = {
path = "/etc/containers/systemd/dhcp_config_init.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/sftp/dhcp_config_init.container"))
)
}
}
dhcp_config_filesystem = {
device = "${local.data_device_path}-part2"
format = "ext4"
label = "dhcp_config"
sftp_dhcp_config_volume_file = {
path = "/etc/containers/systemd/dhcp_config.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/sftp/dhcp_config.volume"))
)
}
}
sftp_dhcp_config_init_container = {
path = "/etc/containers/systemd/dhcp_config_init.container"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/sftp/dhcp_config_init.container"))
)
}
ignition_files_filesystem = {
device = "${local.data_device_path}-part5"
format = "ext4"
label = "ign_files"
}
sftp_ignition_files_init_container = {
path = "/etc/containers/systemd/ign_files_init.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/sftp/ign_files_init.container"))
)
}
}
sftp_dhcp_config_volume_file = {
path = "/etc/containers/systemd/dhcp_config.volume"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/sftp/dhcp_config.volume"))
)
}
}
ignition_files_filesystem = {
device = "${local.data_device_path}-part5"
format = "ext4"
label = "ign_files"
sftp_ignition_files_volume_file = {
path = "/etc/containers/systemd/ign_files.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/sftp/ign_files.volume"))
)
}
}
sftp_ignition_files_init_container = {
path = "/etc/containers/systemd/ign_files_init.container"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/sftp/ign_files_init.container"))
)
}
}
sftp_filesystems = [
local.sftp_keys_filesystem,
local.dhcp_config_filesystem,
local.ignition_files_filesystem,
]
sftp_ignition_files_volume_file = {
path = "/etc/containers/systemd/ign_files.volume"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/sftp/ign_files.volume"))
)
}
}
sftp_directories = [
local.sftp_build_dir,
]
sftp_filesystems = [
local.sftp_keys_filesystem,
local.dhcp_config_filesystem,
local.ignition_files_filesystem,
]
sftp_files = concat(
[
local.sftp_keys_volume_file,
local.sftp_init_keys_container_file,
local.sftp_container_file,
local.sftp_containerfile_file,
local.sftp_dhcp_config_init_container,
local.sftp_dhcp_config_volume_file,
local.sftp_ignition_files_init_container,
local.sftp_ignition_files_volume_file,
],
module.sftp.files
)
sftp_directories = [
local.sftp_build_dir,
]
sftp_files = concat(
[
local.sftp_keys_volume_file,
local.sftp_init_keys_container_file,
local.sftp_container_file,
local.sftp_containerfile_file,
local.sftp_dhcp_config_init_container,
local.sftp_dhcp_config_volume_file,
local.sftp_ignition_files_init_container,
local.sftp_ignition_files_volume_file,
],
module.sftp.files
)
# we can safely ignore the systemd units and users since all of them go in the container and we already took care of it in the Containerfile
# we can safely ignore the systemd units and users since all of them go in the container and we already took care of it in the Containerfile
}

View file

@ -1,78 +1,78 @@
variable "pve_api_base_url" {
type = string
nullable = false
type = string
nullable = false
}
variable "pve_api_token" {
type = string
nullable = false
sensitive = true
type = string
nullable = false
sensitive = true
}
variable "pve_node_name" {
type = string
nullable = false
type = string
nullable = false
}
variable "pve_storage_id" {
type = string
nullable = false
type = string
nullable = false
}
variable "pve_vm_id" {
type = number
nullable = false
type = number
nullable = false
}
variable "prod_network_name" {
type = string
nullable = false
type = string
nullable = false
}
variable "dhcp_server_ip_addr" {
type = string
nullable = false
validation {
condition = can(cidrnetmask("${var.dhcp_server_ip_addr}/32"))
error_message = "Invalid DHCP server address."
}
type = string
nullable = false
validation {
condition = can(cidrnetmask("${var.dhcp_server_ip_addr}/32"))
error_message = "Invalid DHCP server address."
}
}
variable "dhcp_iface" {
type = string
nullable = false
type = string
nullable = false
}
variable "dhcp_gateway" {
type = string
nullable = false
validation {
condition = can(cidrnetmask("${var.dhcp_gateway}/32"))
error_message = "Invalid gateway"
}
type = string
nullable = false
validation {
condition = can(cidrnetmask("${var.dhcp_gateway}/32"))
error_message = "Invalid gateway"
}
}
variable "dhcp_range" {
type = string
nullable = false
validation {
condition = can(cidrnetmask(var.dhcp_range))
error_message = "Invalid DHCP range."
}
type = string
nullable = false
validation {
condition = can(cidrnetmask(var.dhcp_range))
error_message = "Invalid DHCP range."
}
}
variable "ssh_public_key_opentofu_netboot_server" {
type = string
nullable = false
type = string
nullable = false
}
variable "fcos_image_version" {
type = string
nullable = false
default = "40.20240504.3.0"
type = string
nullable = false
default = "40.20240504.3.0"
}
variable "hostname" {
type = string
nullable = false
type = string
nullable = false
}

View file

@ -0,0 +1,102 @@
http://${nextcloud_domain}:80 {
redir https://${nextcloud_domain}{uri} permanent
}
${nextcloud_domain}:443 {
root * /var/www/html
encode gzip zstd
rewrite /ocm-provider/ /index.php
rewrite /ocs-provider/ /ocs-provider/index.php
rewrite /remote /remote.php
rewrite /remote/* /remote.php?{query}
redir /.well-known/caldav /remote.php/dav 301
redir /.well-known/carddav /remote.php/dav 301
redir /.well-known/webfinger /index.php/.well-known/webfinger 301
redir /.well-known/nodeinfo /index.php/.well-known/nodeinfo 301
# .htaccess / data / config / ... shouldn't be accessible from outside
@forbidden {
path /.htaccess
path /.xml
path /console.php
path /cron.php
path /3rdparty/*
path /README
path /autotest/*
path /build/*
path /config/*
path /console/*
path /data/*
path /db_*/*
path /db_structure
path /indie/*
path /issue/*
path /lib/*
path /occ
path /occ/*
path /templates/*
path /tests/*
}
respond @forbidden 404
@static {
method GET HEAD
not path /index.php*
not path /apps/theming/composer/*
not path /apps/theming/lib/*
not path /apps/theming/templates/*
not path /apps/theming/theme/*
not path /js/core/merged-template-prepend.js
path *.css
path *.css.map
path *.gif
path *.ico
path *.jpg
path *.js
path *.js.map
path *.json
path *.mjs
path *.otf
path *.png
path *.svg
path *.tflite
path *.wasm
path *.webp
path *.woff2
}
handle @static {
header Cache-Control "max-age=604800"
file_server
}
handle * {
@index_files file {
try_files {path} {path}/index.php /index.php{path}
split_path .php
}
rewrite @index_files {file_match.relative}
php_fastcgi nextcloud:9000 {
env PATH /bin
env modHeadersAvailable true
env front_controller_active true
dial_timeout 60s
read_timeout 3600s
write_timeout 300s
}
}
header Strict-Transport-Security "max-age=15768000;"
header X-Content-Type-Options "nosniff"
header X-XSS-Protection "1; mode=block"
header X-Robots-Tag "noindex, nofollow"
header X-Download-Options "noopen"
header X-Permitted-Cross-Domain-Policies "none"
header Referrer-Policy "no-referrer"
header X-Frame-Options "SAMEORIGIN"
log
tls internal
}

View file

@ -0,0 +1,6 @@
[Unit]
Description = Caddy Backend Network
[Network]
NetworkName = caddy-backend
Internal = true

View file

@ -0,0 +1,11 @@
[Unit]
Description = Caddy Data Volume
[Volume]
VolumeName = ${caddy_data_volume_name}
Device=/dev/disk/by-label/${caddy_data_volume_name}
Options=nodev,noexec,nosuid,rootcontext=system_u:object_r:container_file_t:s0
Type=ext4
[Install]
WantedBy=default.target

View file

@ -0,0 +1,5 @@
[Unit]
Description = Caddy Frontend Network
[Network]
NetworkName = caddy-frontend

View file

@ -0,0 +1,23 @@
[Unit]
Description = Caddy Reverse Proxy
[Container]
ContainerName = ${caddy_container_name}
Image = ${caddy_image_name}:${caddy_image_tag}
Network = caddy-frontend.network
Network = caddy-backend.network
Volume = caddy-data.volume:/data:z
Volume = ${caddyfile_file_path}:/etc/caddy/Caddyfile:ro,z
Volume = nextcloud-data.volume:/var/www/html:ro,z
PublishPort = 8080:80
PublishPort = 8443:443
[Service]
ExecStartPre=/usr/bin/chcon -t container_file_t ${caddyfile_file_path}
Restart=on-failure
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,10 @@
[Unit]
Description = Generate Secrets
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/usr/bin/bash -c "podman secret exists ${postgres_password_secret_name} || head -c 16 /dev/urandom | base64 | podman secret create ${postgres_password_secret_name} -"
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,6 @@
[Unit]
Description = Nextcloud Backend Network
[Network]
NetworkName = nextcloud-backend
Internal = true

View file

@ -0,0 +1,11 @@
[Unit]
Description = Nextcloud Data Volume
[Volume]
VolumeName = ${nextcloud_data_volume_name}
Device=/dev/disk/by-label/${nextcloud_data_volume_name}
Options=nodev,noexec,nosuid,rootcontext=system_u:object_r:container_file_t:s0
Type=ext4
[Install]
WantedBy=default.target

View file

@ -0,0 +1,5 @@
[Unit]
Description = Nextcloud Internet Network
[Network]
NetworkName = nextcloud-internet

View file

@ -0,0 +1,31 @@
[Unit]
Description = "Nextcloud"
Wants=generate-secrets.service
After=generate-secrets.service
[Container]
ContainerName = ${nextcloud_container_name}
Image = ${nextcloud_image_name}:${nextcloud_image_tag}
Network=caddy-backend.network
Network=nextcloud-backend.network
Network=nextcloud-internet.network
Volume = nextcloud-data.volume:/var/www/html:z
Volume = ${php_fpm_config_file_path}:/usr/local/etc/php-fpm.d/www.conf:ro,z
EnvironmentFile=${postgres_env_file_path}
Environment=POSTGRES_HOST=${postgres_container_name}
Secret=${postgres_password_secret_name}
Environment=REDIS_HOST=${valkey_container_name}
Environment=NEXTCLOUD_TRUSTED_DOMAINS=${nextcloud_trusted_domains}
Environment=PHP_UPLOAD_LIMIT=${php_upload_limit}
Environment=TRUSTED_PROXIES=${nextcloud_trusted_proxies}
[Service]
Restart = on-failure
ExecStartPre = /usr/bin/chcon -t container_file_t ${php_fpm_config_file_path}
[Install]
WantedBy = multi-user.target

View file

@ -0,0 +1,490 @@
; Start a new pool named 'www'.
; the variable $pool can be used in any directive and will be replaced by the
; pool name ('www' here)
[www]
; Per pool prefix
; It only applies on the following directives:
; - 'access.log'
; - 'slowlog'
; - 'listen' (unixsocket)
; - 'chroot'
; - 'chdir'
; - 'php_values'
; - 'php_admin_values'
; When not set, the global prefix (or NONE) applies instead.
; Note: This directive can also be relative to the global prefix.
; Default Value: none
;prefix = /path/to/pools/$pool
; Unix user/group of the child processes. This can be used only if the master
; process running user is root. It is set after the child process is created.
; The user and group can be specified either by their name or by their numeric
; IDs.
; Note: If the user is root, the executable needs to be started with
; --allow-to-run-as-root option to work.
; Default Values: The user is set to master process running user by default.
; If the group is not set, the user's group is used.
user = www-data
group = www-data
; The address on which to accept FastCGI requests.
; Valid syntaxes are:
; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
; a specific port;
; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
; a specific port;
; 'port' - to listen on a TCP socket to all addresses
; (IPv6 and IPv4-mapped) on a specific port;
; '/path/to/unix/socket' - to listen on a unix socket.
; Note: This value is mandatory.
listen = 9000
; Set listen(2) backlog.
; Default Value: 511 (-1 on Linux, FreeBSD and OpenBSD)
;listen.backlog = 511
; Set permissions for unix socket, if one is used. In Linux, read/write
; permissions must be set in order to allow connections from a web server. Many
; BSD-derived systems allow connections regardless of permissions. The owner
; and group can be specified either by name or by their numeric IDs.
; Default Values: Owner is set to the master process running user. If the group
; is not set, the owner's group is used. Mode is set to 0660.
;listen.owner = www-data
;listen.group = www-data
;listen.mode = 0660
; When POSIX Access Control Lists are supported you can set them using
; these options, value is a comma separated list of user/group names.
; When set, listen.owner and listen.group are ignored
;listen.acl_users =
;listen.acl_groups =
; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect.
; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
; must be separated by a comma. If this value is left blank, connections will be
; accepted from any ip address.
; Default Value: any
;listen.allowed_clients = 127.0.0.1
; Set the associated the route table (FIB). FreeBSD only
; Default Value: -1
;listen.setfib = 1
; Specify the nice(2) priority to apply to the pool processes (only if set)
; The value can vary from -19 (highest priority) to 20 (lower priority)
; Note: - It will only work if the FPM master process is launched as root
; - The pool processes will inherit the master process priority
; unless it specified otherwise
; Default Value: no set
; process.priority = -19
; Set the process dumpable flag (PR_SET_DUMPABLE prctl for Linux or
; PROC_TRACE_CTL procctl for FreeBSD) even if the process user
; or group is different than the master process user. It allows to create process
; core dump and ptrace the process for the pool user.
; Default Value: no
; process.dumpable = yes
; Choose how the process manager will control the number of child processes.
; Possible Values:
; static - a fixed number (pm.max_children) of child processes;
; dynamic - the number of child processes are set dynamically based on the
; following directives. With this process management, there will be
; always at least 1 children.
; pm.max_children - the maximum number of children that can
; be alive at the same time.
; pm.start_servers - the number of children created on startup.
; pm.min_spare_servers - the minimum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is less than this
; number then some children will be created.
; pm.max_spare_servers - the maximum number of children in 'idle'
; state (waiting to process). If the number
; of 'idle' processes is greater than this
; number then some children will be killed.
; pm.max_spawn_rate - the maximum number of rate to spawn child
; processes at once.
; ondemand - no children are created at startup. Children will be forked when
; new requests will connect. The following parameter are used:
; pm.max_children - the maximum number of children that
; can be alive at the same time.
; pm.process_idle_timeout - The number of seconds after which
; an idle process will be killed.
; Note: This value is mandatory.
pm = dynamic
; The number of child processes to be created when pm is set to 'static' and the
; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
; This value sets the limit on the number of simultaneous requests that will be
; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
; CGI. The below defaults are based on a server without much resources. Don't
; forget to tweak pm.* to fit your needs.
; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
; Note: This value is mandatory.
pm.max_children = 120
; The number of child processes created on startup.
; Note: Used only when pm is set to 'dynamic'
; Default Value: (min_spare_servers + max_spare_servers) / 2
pm.start_servers = 12
; The desired minimum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.min_spare_servers = 6
; The desired maximum number of idle server processes.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
pm.max_spare_servers = 18
; The number of rate to spawn child processes at once.
; Note: Used only when pm is set to 'dynamic'
; Note: Mandatory when pm is set to 'dynamic'
; Default Value: 32
;pm.max_spawn_rate = 32
; The number of seconds after which an idle process will be killed.
; Note: Used only when pm is set to 'ondemand'
; Default Value: 10s
;pm.process_idle_timeout = 10s;
; The number of requests each child process should execute before respawning.
; This can be useful to work around memory leaks in 3rd party libraries. For
; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
; Default Value: 0
;pm.max_requests = 500
; The URI to view the FPM status page. If this value is not set, no URI will be
; recognized as a status page. It shows the following information:
; pool - the name of the pool;
; process manager - static, dynamic or ondemand;
; start time - the date and time FPM has started;
; start since - number of seconds since FPM has started;
; accepted conn - the number of request accepted by the pool;
; listen queue - the number of request in the queue of pending
; connections (see backlog in listen(2));
; max listen queue - the maximum number of requests in the queue
; of pending connections since FPM has started;
; listen queue len - the size of the socket queue of pending connections;
; idle processes - the number of idle processes;
; active processes - the number of active processes;
; total processes - the number of idle + active processes;
; max active processes - the maximum number of active processes since FPM
; has started;
; max children reached - number of times, the process limit has been reached,
; when pm tries to start more children (works only for
; pm 'dynamic' and 'ondemand');
; Value are updated in real time.
; Example output:
; pool: www
; process manager: static
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 62636
; accepted conn: 190460
; listen queue: 0
; max listen queue: 1
; listen queue len: 42
; idle processes: 4
; active processes: 11
; total processes: 15
; max active processes: 12
; max children reached: 0
;
; By default the status page output is formatted as text/plain. Passing either
; 'html', 'xml' or 'json' in the query string will return the corresponding
; output syntax. Example:
; http://www.foo.bar/status
; http://www.foo.bar/status?json
; http://www.foo.bar/status?html
; http://www.foo.bar/status?xml
;
; By default the status page only outputs short status. Passing 'full' in the
; query string will also return status for each pool process.
; Example:
; http://www.foo.bar/status?full
; http://www.foo.bar/status?json&full
; http://www.foo.bar/status?html&full
; http://www.foo.bar/status?xml&full
; The Full status returns for each process:
; pid - the PID of the process;
; state - the state of the process (Idle, Running, ...);
; start time - the date and time the process has started;
; start since - the number of seconds since the process has started;
; requests - the number of requests the process has served;
; request duration - the duration in µs of the requests;
; request method - the request method (GET, POST, ...);
; request URI - the request URI with the query string;
; content length - the content length of the request (only with POST);
; user - the user (PHP_AUTH_USER) (or '-' if not set);
; script - the main script called (or '-' if not set);
; last request cpu - the %cpu the last request consumed
; it's always 0 if the process is not in Idle state
; because CPU calculation is done when the request
; processing has terminated;
; last request memory - the max amount of memory the last request consumed
; it's always 0 if the process is not in Idle state
; because memory calculation is done when the request
; processing has terminated;
; If the process is in Idle state, then informations are related to the
; last request the process has served. Otherwise informations are related to
; the current request being served.
; Example output:
; ************************
; pid: 31330
; state: Running
; start time: 01/Jul/2011:17:53:49 +0200
; start since: 63087
; requests: 12808
; request duration: 1250261
; request method: GET
; request URI: /test_mem.php?N=10000
; content length: 0
; user: -
; script: /home/fat/web/docs/php/test_mem.php
; last request cpu: 0.00
; last request memory: 0
;
; Note: There is a real-time FPM status monitoring sample web page available
; It's available in: /usr/local/share/php/fpm/status.html
;
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
;pm.status_path = /status
; The address on which to accept FastCGI status request. This creates a new
; invisible pool that can handle requests independently. This is useful
; if the main pool is busy with long running requests because it is still possible
; to get the status before finishing the long running requests.
;
; Valid syntaxes are:
; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
; a specific port;
; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
; a specific port;
; 'port' - to listen on a TCP socket to all addresses
; (IPv6 and IPv4-mapped) on a specific port;
; '/path/to/unix/socket' - to listen on a unix socket.
; Default Value: value of the listen option
;pm.status_listen = 127.0.0.1:9001
; The ping URI to call the monitoring page of FPM. If this value is not set, no
; URI will be recognized as a ping page. This could be used to test from outside
; that FPM is alive and responding, or to
; - create a graph of FPM availability (rrd or such);
; - remove a server from a group if it is not responding (load balancing);
; - trigger alerts for the operating team (24/7).
; Note: The value must start with a leading slash (/). The value can be
; anything, but it may not be a good idea to use the .php extension or it
; may conflict with a real PHP file.
; Default Value: not set
;ping.path = /ping
; This directive may be used to customize the response of a ping request. The
; response is formatted as text/plain with a 200 response code.
; Default Value: pong
;ping.response = pong
; The access log file
; Default: not set
;access.log = log/$pool.access.log
; The access log format.
; The following syntax is allowed
; %%: the '%' character
; %C: %CPU used by the request
; it can accept the following format:
; - %{user}C for user CPU only
; - %{system}C for system CPU only
; - %{total}C for user + system CPU (default)
; %d: time taken to serve the request
; it can accept the following format:
; - %{seconds}d (default)
; - %{milliseconds}d
; - %{milli}d
; - %{microseconds}d
; - %{micro}d
; %e: an environment variable (same as $_ENV or $_SERVER)
; it must be associated with embraces to specify the name of the env
; variable. Some examples:
; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e
; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e
; %f: script filename
; %l: content-length of the request (for POST request only)
; %m: request method
; %M: peak of memory allocated by PHP
; it can accept the following format:
; - %{bytes}M (default)
; - %{kilobytes}M
; - %{kilo}M
; - %{megabytes}M
; - %{mega}M
; %n: pool name
; %o: output header
; it must be associated with embraces to specify the name of the header:
; - %{Content-Type}o
; - %{X-Powered-By}o
; - %{Transfert-Encoding}o
; - ....
; %p: PID of the child that serviced the request
; %P: PID of the parent of the child that serviced the request
; %q: the query string
; %Q: the '?' character if query string exists
; %r: the request URI (without the query string, see %q and %Q)
; %R: remote IP address
; %s: status (response code)
; %t: server time the request was received
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; The strftime(3) format must be encapsulated in a %{<strftime_format>}t tag
; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
; %T: time the log has been written (the request has finished)
; it can accept a strftime(3) format:
; %d/%b/%Y:%H:%M:%S %z (default)
; The strftime(3) format must be encapsulated in a %{<strftime_format>}t tag
; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
; %u: remote user
;
; Default: "%R - %u %t \"%m %r\" %s"
;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{milli}d %{kilo}M %C%%"
; A list of request_uri values which should be filtered from the access log.
;
; As a security precuation, this setting will be ignored if:
; - the request method is not GET or HEAD; or
; - there is a request body; or
; - there are query parameters; or
; - the response code is outwith the successful range of 200 to 299
;
; Note: The paths are matched against the output of the access.format tag "%r".
; On common configurations, this may look more like SCRIPT_NAME than the
; expected pre-rewrite URI.
;
; Default Value: not set
;access.suppress_path[] = /ping
;access.suppress_path[] = /health_check.php
; The log file for slow requests
; Default Value: not set
; Note: slowlog is mandatory if request_slowlog_timeout is set
;slowlog = log/$pool.log.slow
; The timeout for serving a single request after which a PHP backtrace will be
; dumped to the 'slowlog' file. A value of '0s' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_slowlog_timeout = 0
; Depth of slow log stack trace.
; Default Value: 20
;request_slowlog_trace_depth = 20
; The timeout for serving a single request after which the worker process will
; be killed. This option should be used when the 'max_execution_time' ini option
; does not stop script execution for some reason. A value of '0' means 'off'.
; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
; Default Value: 0
;request_terminate_timeout = 0
; The timeout set by 'request_terminate_timeout' ini option is not engaged after
; application calls 'fastcgi_finish_request' or when application has finished and
; shutdown functions are being called (registered via register_shutdown_function).
; This option will enable timeout limit to be applied unconditionally
; even in such cases.
; Default Value: no
;request_terminate_timeout_track_finished = no
; Set open file descriptor rlimit.
; Default Value: system defined value
;rlimit_files = 1024
; Set max core size rlimit.
; Possible Values: 'unlimited' or an integer greater or equal to 0
; Default Value: system defined value
;rlimit_core = 0
; Chroot to this directory at the start. This value must be defined as an
; absolute path. When this value is not set, chroot is not used.
; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
; of its subdirectories. If the pool prefix is not set, the global prefix
; will be used instead.
; Note: chrooting is a great security feature and should be used whenever
; possible. However, all PHP paths will be relative to the chroot
; (error_log, sessions.save_path, ...).
; Default Value: not set
;chroot =
; Chdir to this directory at the start.
; Note: relative path can be used.
; Default Value: current directory or / when chroot
;chdir = /var/www
; Redirect worker stdout and stderr into main error log. If not set, stdout and
; stderr will be redirected to /dev/null according to FastCGI specs.
; Note: on highloaded environment, this can cause some delay in the page
; process time (several ms).
; Default Value: no
;catch_workers_output = yes
; Decorate worker output with prefix and suffix containing information about
; the child that writes to the log and if stdout or stderr is used as well as
; log level and time. This options is used only if catch_workers_output is yes.
; Settings to "no" will output data as written to the stdout or stderr.
; Default value: yes
;decorate_workers_output = no
; Clear environment in FPM workers
; Prevents arbitrary environment variables from reaching FPM worker processes
; by clearing the environment in workers before env vars specified in this
; pool configuration are added.
; Setting to "no" will make all environment variables available to PHP code
; via getenv(), $_ENV and $_SERVER.
; Default Value: yes
;clear_env = no
; Limits the extensions of the main script FPM will allow to parse. This can
; prevent configuration mistakes on the web server side. You should only limit
; FPM to .php extensions to prevent malicious users to use other extensions to
; execute php code.
; Note: set an empty value to allow all extensions.
; Default Value: .php
;security.limit_extensions = .php .php3 .php4 .php5 .php7
; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
; the current environment.
; Default Value: clean env
;env[HOSTNAME] = $HOSTNAME
;env[PATH] = /usr/local/bin:/usr/bin:/bin
;env[TMP] = /tmp
;env[TMPDIR] = /tmp
;env[TEMP] = /tmp
; Additional php.ini defines, specific to this pool of workers. These settings
; overwrite the values previously defined in the php.ini. The directives are the
; same as the PHP SAPI:
; php_value/php_flag - you can set classic ini defines which can
; be overwritten from PHP call 'ini_set'.
; php_admin_value/php_admin_flag - these directives won't be overwritten by
; PHP call 'ini_set'
; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
; Defining 'extension' will load the corresponding shared extension from
; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
; overwrite previously defined php.ini values, but will append the new value
; instead.
; Note: path INI options can be relative and will be expanded with the prefix
; (pool, global or /usr/local)
; Default Value: nothing is defined by default except the values in php.ini and
; specified at startup with the -d argument
;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
;php_flag[display_errors] = off
;php_admin_value[error_log] = /var/log/fpm-php.www.log
;php_admin_flag[log_errors] = on
;php_admin_value[memory_limit] = 32M

View file

@ -0,0 +1,11 @@
[Unit]
Description = Postgresql Data Volume
[Volume]
VolumeName = ${postgres_data_volume_name}
Device=/dev/disk/by-label/${postgres_data_volume_name}
Options=nodev,noexec,nosuid,rootcontext=system_u:object_r:container_file_t:s0
Type=ext4
[Install]
WantedBy=default.target

View file

@ -0,0 +1,23 @@
[Unit]
Description = Postgresql
Wants=generate-secrets.service
After=generate-secrets.service
[Container]
ContainerName = ${postgres_container_name}
Image = ${postgres_image_name}:${postgres_image_tag}
Volume = postgres-data.volume:/var/lib/postgresql/data:z
Network = nextcloud-backend.network
EnvironmentFile=${postgres_env_file_path}
Environment=PGDATA=/var/lib/postgresql/data/pgdata
Secret=${postgres_password_secret_name}
[Service]
Restart=on-failure
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,3 @@
POSTGRES_USER=${postgres_user_name}
POSTGRES_DB=${postgres_database_name}
POSTGRES_PASSWORD_FILE=/run/secrets/postgres-passwd

View file

@ -0,0 +1,8 @@
[Unit]
Description = "Valkey Volume"
[Volume]
VolumeName = ${valkey_data_volume_name}
Device=/dev/disk/by-label/${valkey_data_volume_name}
Options=nodev,noexec,nosuid,rootcontext=system_u:object_r:container_file_t:s0
Type=ext4

View file

@ -0,0 +1,20 @@
[Unit]
Description = "Valkey Container"
[Container]
ContainerName = "${valkey_container_name}"
Image = "${valkey_image_name}:${valkey_image_tag}"
Network = nextcloud-backend.network
Volume = valkey-data.volume:/data:z
# skips find/chown in docker entrypoint which tries to chown lost+found and receive a perm denied
User=valkey
Group=valkey
[Service]
Restart=on-failure
[Install]
WantedBy=default.target

603
modules/nextcloud/main.tf Normal file
View file

@ -0,0 +1,603 @@
locals {
data_device_path = "/dev/vdb"
luks_device_name = "dm-crypt0"
caddy_container_name = "caddy"
nextcloud_container_name = "nextcloud"
postgres_container_name = "psql"
valkey_container_name = "valkey"
caddyfile_dir_path = "/opt/caddy"
caddyfile_file_path = "${local.caddyfile_dir_path}/Caddyfile"
postgres_password_secret_name = "postgres-passwd"
php_fpm_config_dir_path = "/opt/php"
php_fpm_config_file_path = "${local.php_fpm_config_dir_path}/www.conf"
caddy_data_volume_name = "caddy-data"
nextcloud_data_volume_name = "nextcloud-data"
postgres_data_volume_name = "postgres-data"
valkey_data_volume_name = "valkey-data"
caddy_image_name = "docker.io/caddy"
caddy_image_tag = "2.9.1-alpine"
nextcloud_image_name = "docker.io/nextcloud"
nextcloud_image_tag = "stable-fpm-alpine"
postgres_image_name = "docker.io/postgres"
postgres_image_tag = "12.22"
valkey_image_name = "docker.io/valkey/valkey"
valkey_image_tag = "8.0-alpine3.21"
postgres_env_file_path = "/etc/postgres.env"
data_disk = {
device = local.data_device_path
wipeTable = true
partitions = [
{
label = local.caddy_data_volume_name
number = 1
sizeMiB = 500
wipePartitionEntry = true
shouldExist = true
resize = true
},
{
label = local.nextcloud_data_volume_name
number = 2
sizeMiB = 100 * 1024
wipePartitionEntry = true
shouldExist = true
resize = true
},
{
label = local.postgres_data_volume_name
number = 3
sizeMiB = 10 * 1024
wipePartitionEntry = true
shouldExist = true
resize = true
},
{
label = local.valkey_data_volume_name
number = 4
sizeMiB = 1024
wipePartitionEntry = true
shouldExist = true
resize = true
},
]
}
caddy_data_luks = merge(
{
name = "encrypted-${local.caddy_data_volume_name}"
device = "${local.data_device_path}1"
label = "encrypted-${local.caddy_data_volume_name}"
wipeVolume = false
},
var.luks_passphrase == "" ? {} : {
keyFile = {
source = format(
"data:text/plain;base64,%s",
var.luks_passphrase
)
},
},
var.luks_use_tpm2 ? {
clevis = {
tpm2 = true
}
} : {}
)
nextcloud_data_luks = merge(
{
name = "encrypted-${local.nextcloud_data_volume_name}"
device = "${local.data_device_path}2"
label = "encrypted-${local.nextcloud_data_volume_name}"
wipeVolume = false
},
var.luks_passphrase == "" ? {} : {
keyFile = {
source = format(
"data:text/plain;base64,%s",
var.luks_passphrase
)
}
},
var.luks_use_tpm2 ? {
clevis = {
tpm2 = true
}
} : {}
)
postgres_data_luks = merge(
{
name = "encrypted-${local.postgres_data_volume_name}"
device = "${local.data_device_path}3"
label = "encrypted-${local.postgres_data_volume_name}"
wipeVolume = false
},
var.luks_passphrase == "" ? {} : {
keyFile = {
source = format(
"data:text/plain;base64,%s",
var.luks_passphrase
)
}
},
var.luks_use_tpm2 ? {
clevis = {
tpm2 = true
}
} : {}
)
valkey_data_luks = merge(
{
name = "encrypted-${local.valkey_data_volume_name}"
device = "${local.data_device_path}4"
label = "encrypted-${local.valkey_data_volume_name}"
wipeVolume = false
},
var.luks_passphrase == "" ? {} : {
keyFile = {
source = format(
"data:text/plain;base64,%s",
var.luks_passphrase
)
}
},
var.luks_use_tpm2 ? {
clevis = {
tpm2 = true
}
} : {}
)
caddy_data_filesystem = {
device = "/dev/disk/by-id/dm-name-encrypted-${local.caddy_data_volume_name}"
format = "ext4"
label = local.caddy_data_volume_name
wipeFilesystem = false
}
nextcloud_data_filesystem = {
device = "/dev/disk/by-id/dm-name-encrypted-${local.nextcloud_data_volume_name}"
format = "ext4"
label = local.nextcloud_data_volume_name
wipeFilesystem = false
}
postgres_data_filesystem = {
device = "/dev/disk/by-id/dm-name-encrypted-${local.postgres_data_volume_name}"
format = "ext4"
label = local.postgres_data_volume_name
wipeFilesystem = false
}
valkey_data_filesystem = {
device = "/dev/disk/by-id/dm-name-encrypted-${local.valkey_data_volume_name}"
format = "ext4"
label = local.valkey_data_volume_name
wipeFilesystem = false
options = [
"-E", "root_owner=999:999",
]
}
hostname_file = {
path = "/etc/hostname"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = "data:text/plain,nextcloud"
}
}
hosts_file = {
path = "/etc/hosts"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
append = [
{
source = format(
"data:text/plain;base64,%s",
base64encode(
"${var.reverse_proxy_ip_address} ${var.nextcloud_domain}"
)
)
}
]
}
caddy_frontend_network_file = {
path = "/etc/containers/systemd/caddy-frontend.network"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
file("${path.module}/files/caddy-frontend.network")
)
)
}
}
caddy_backend_network_file = {
path = "/etc/containers/systemd/caddy-backend.network"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
file("${path.module}/files/caddy-backend.network")
)
)
}
}
nextcloud_backend_network_file = {
path = "/etc/containers/systemd/nextcloud-backend.network"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
file("${path.module}/files/nextcloud-backend.network")
)
)
}
}
nextcloud_internet_network_file = {
path = "/etc/containers/systemd/nextcloud-internet.network"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plein;base64,%s",
base64encode(
file("${path.module}/files/nextcloud-internet.network")
)
)
}
}
caddy_data_volume_file = {
path = "/etc/containers/systemd/caddy-data.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/caddy-data.volume.tftpl",
{
caddy_data_volume_name = local.caddy_data_volume_name
}
)
)
)
}
}
nextcloud_data_volume_file = {
path = "/etc/containers/systemd/nextcloud-data.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/nextcloud-data.volume.tftpl",
{
nextcloud_data_volume_name = local.nextcloud_data_volume_name
}
)
)
)
}
}
postgres_data_volume_file = {
path = "/etc/containers/systemd/postgres-data.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/postgres-data.volume.tftpl",
{
postgres_data_volume_name = local.postgres_data_volume_name
}
)
)
)
}
}
valkey_data_volume_file = {
path = "/etc/containers/systemd/valkey-data.volume"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/valkey-data.volume.tftpl",
{
valkey_data_volume_name = local.valkey_data_volume_name
}
)
)
)
}
}
caddy_container_file = {
path = "/etc/containers/systemd/caddy.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/caddy.container.tftpl",
{
caddy_container_name = local.caddy_container_name
caddy_image_name = local.caddy_image_name
caddy_image_tag = local.caddy_image_tag
caddyfile_file_path = local.caddyfile_file_path
}
)
)
)
}
}
nextcloud_container_file = {
path = "/etc/containers/systemd/nextcloud.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/nextcloud.container.tftpl",
{
nextcloud_container_name = local.nextcloud_container_name
nextcloud_image_name = local.nextcloud_image_name
nextcloud_image_tag = local.nextcloud_image_tag
postgres_env_file_path = local.postgres_env_file_path
postgres_container_name = local.postgres_container_name
postgres_password_secret_name = local.postgres_password_secret_name
valkey_container_name = local.valkey_container_name
nextcloud_trusted_domains = join(" ", var.nextcloud_trusted_domains)
php_upload_limit = "2048M" # variable ?
php_fpm_config_file_path = local.php_fpm_config_file_path
nextcloud_trusted_proxies = "10.0.0.0/8"
}
)
)
)
}
}
postgres_container_file = {
path = "/etc/containers/systemd/postgres.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/postgres.container.tftpl",
{
postgres_container_name = local.postgres_container_name
postgres_image_name = local.postgres_image_name
postgres_image_tag = local.postgres_image_tag
postgres_env_file_path = local.postgres_env_file_path
postgres_password_secret_name = local.postgres_password_secret_name
}
)
)
)
}
}
valkey_container_file = {
path = "/etc/containers/systemd/valkey.container"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/valkey.container.tftpl",
{
valkey_container_name = local.valkey_container_name
valkey_image_name = local.valkey_image_name
valkey_image_tag = local.valkey_image_tag
}
)
)
)
}
}
caddyfile_dir = {
path = local.caddyfile_dir_path
user = { id = 0 }
group = { id = 0 }
mode = 493 # 0755
}
caddyfile_file = {
path = local.caddyfile_file_path
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/Caddyfile.tftpl",
{
nextcloud_domain = var.nextcloud_domain
nextcloud_container_name = local.nextcloud_container_name
}
)
)
)
}
}
postgres_env_file = {
path = local.postgres_env_file_path
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
templatefile(
"${path.module}/files/postgres.env.tftpl",
{
postgres_user_name = "nextcloud"
postgres_database_name = "nextcloud"
}
)
)
)
}
}
php_config_dir = {
path = local.php_fpm_config_dir_path
user = { id = 0 }
group = { id = 0 }
mode = 493 # 0755
}
php_fpm_config_file = {
path = "/opt/php/www.conf"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(
file("${path.module}/files/php-fpm-www.conf")
)
)
}
}
generate_secrets_systemd_unit = {
name = "generate-secrets.service"
enabled = true
contents = templatefile(
"${path.module}/files/generate-secrets.service.tftpl",
{
postgres_password_secret_name = local.postgres_password_secret_name
}
)
}
ignition_config = {
ignition = {
version = "3.5.0"
}
storage = {
luks = [
local.caddy_data_luks,
local.nextcloud_data_luks,
local.postgres_data_luks,
local.valkey_data_luks,
]
disks = [
local.data_disk,
]
filesystems = [
local.caddy_data_filesystem,
local.nextcloud_data_filesystem,
local.postgres_data_filesystem,
local.valkey_data_filesystem,
]
directories = [
local.caddyfile_dir,
local.php_config_dir,
]
files = [
local.hostname_file,
local.hosts_file,
local.caddy_frontend_network_file,
local.caddy_backend_network_file,
local.nextcloud_backend_network_file,
local.nextcloud_internet_network_file,
local.caddy_data_volume_file,
local.nextcloud_data_volume_file,
local.postgres_data_volume_file,
local.valkey_data_volume_file,
local.caddy_container_file,
local.nextcloud_container_file,
local.postgres_container_file,
local.valkey_container_file,
local.caddyfile_file,
local.postgres_env_file,
local.php_fpm_config_file,
]
}
systemd = {
units = [
local.generate_secrets_systemd_unit,
]
}
passwd = {
users = [
{
name = "core"
sshAuthorizedKeys = var.ssh_authorized_keys
}
]
}
}
}

View file

@ -0,0 +1,3 @@
output "config" {
value = jsonencode(local.ignition_config)
}

View file

@ -0,0 +1,30 @@
variable "ssh_authorized_keys" {
type = list(string)
nullable = false
}
variable "nextcloud_domain" {
type = string
nullable = false
}
variable "nextcloud_trusted_domains" {
type = list(string)
nullable = false
}
variable "reverse_proxy_ip_address" {
type = string
nullable = false
}
variable "luks_passphrase" {
type = string
nullable = false
sensitive = true
}
variable "luks_use_tpm2" {
type = bool
nullable = false
}

View file

@ -1,7 +1,7 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
source = "bpg/proxmox"
version = "~>0.56.1"
}
}
@ -10,162 +10,162 @@ terraform {
locals {
core_user = {
name = "core"
sshAuthorizedKeys = [
var.admin_ssh_public_key
]
}
core_user = {
name = "core"
sshAuthorizedKeys = [
var.admin_ssh_public_key
]
}
hostname_file = {
path = "/etc/hostname"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
hostname_file = {
path = "/etc/hostname"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(var.instance_name)
)
}
}
ignition_configuration = jsonencode({
ignition = {
version = "3.4.0"
}
storage = {
files = [
{
path = "/etc/hostname"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(var.instance_name)
"data:text/plain;base64,%s",
base64encode(var.instance_name)
)
}
}
},
]
}
ignition_configuration = jsonencode({
ignition = {
version = "3.4.0"
}
storage = {
files = [
{
path = "/etc/hostname"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(var.instance_name)
)
}
},
]
}
passwd = {
users = [
local.core_user
]
}
})
passwd = {
users = [
local.core_user
]
}
})
}
resource "random_pet" "config_name" {
length = 4
length = 4
}
locals {
generated_ignition_config_file = "${path.module}/poc_ignition_config_${random_pet.config_name.id}.ign"
generated_ignition_config_file = "${path.module}/poc_ignition_config_${random_pet.config_name.id}.ign"
}
resource "local_file" "sftp_script_for_ignition_file" {
content = <<EOT
content = <<EOT
cd writable
-rm ${var.pve_vm_id}.ign
put ${local.generated_ignition_config_file} ${var.pve_vm_id}.ign
EOT
filename = "${path.module}/poc_sftp_script_for_ignition_config_${random_pet.config_name.id}"
file_permission = "0644"
filename = "${path.module}/poc_sftp_script_for_ignition_config_${random_pet.config_name.id}"
file_permission = "0644"
}
resource "local_file" "poc_ignition_config" {
content = local.ignition_configuration
filename = local.generated_ignition_config_file
file_permission = "0644"
content = local.ignition_configuration
filename = local.generated_ignition_config_file
file_permission = "0644"
provisioner "local-exec" {
command = <<EOT
provisioner "local-exec" {
command = <<EOT
sftp -P ${var.netboot_server_sftp_port} \
-o ProxyJump=${var.pve_ssh_user}@${var.pve_ssh_host} \
-b "${path.module}/poc_sftp_script_for_ignition_config_${random_pet.config_name.id}" \
terraform_ignition@${var.netboot_server_ip_address}
EOT
}
lifecycle {
replace_triggered_by = [local_file.sftp_script_for_ignition_file]
}
}
lifecycle {
replace_triggered_by = [local_file.sftp_script_for_ignition_file]
}
}
resource "local_file" "sftp_script_for_dhcp_config" {
content = <<EOT
content = <<EOT
cd writable
-rm ${var.pve_vm_id}.conf
put ${path.module}/poc_dhcp_config_${random_pet.config_name.id}.conf ${var.pve_vm_id}.conf
EOT
filename = "${path.module}/poc_sftp_script_for_dhcp_config_${random_pet.config_name.id}"
file_permission = "0644"
filename = "${path.module}/poc_sftp_script_for_dhcp_config_${random_pet.config_name.id}"
file_permission = "0644"
}
resource "local_file" "dhcp_config" {
depends_on = [ local_file.sftp_script_for_dhcp_config ]
content = templatefile(
"${path.module}/files/dhcp_config.conf.tftpl",
{
vm_id = var.pve_vm_id
hostname = var.instance_name
host_ip = cidrhost(var.admin_network.prefix, var.pve_vm_id)
mac_address = var.admin_network.mac_address
}
)
filename = "${path.module}/poc_dhcp_config_${random_pet.config_name.id}.conf"
file_permission = "0644"
depends_on = [local_file.sftp_script_for_dhcp_config]
content = templatefile(
"${path.module}/files/dhcp_config.conf.tftpl",
{
vm_id = var.pve_vm_id
hostname = var.instance_name
host_ip = cidrhost(var.admin_network.prefix, var.pve_vm_id)
mac_address = var.admin_network.mac_address
}
)
filename = "${path.module}/poc_dhcp_config_${random_pet.config_name.id}.conf"
file_permission = "0644"
provisioner "local-exec" {
command = <<EOT
provisioner "local-exec" {
command = <<EOT
sftp -P ${var.netboot_server_sftp_port} \
-o ProxyJump=${var.pve_ssh_user}@${var.pve_ssh_host} \
-b "${path.module}/poc_sftp_script_for_dhcp_config_${random_pet.config_name.id}" \
terraform_dhcp@${var.netboot_server_ip_address}
EOT
}
}
lifecycle {
replace_triggered_by = [local_file.sftp_script_for_dhcp_config]
}
lifecycle {
replace_triggered_by = [local_file.sftp_script_for_dhcp_config]
}
}
resource "proxmox_virtual_environment_vm" "poc" {
name = var.instance_name
node_name = var.pve_node_name
vm_id = var.pve_vm_id
cpu {
architecture = "x86_64"
type = "host"
sockets = 1
cores = 4
}
name = var.instance_name
node_name = var.pve_node_name
vm_id = var.pve_vm_id
memory {
dedicated = 4096
}
cpu {
architecture = "x86_64"
type = "host"
sockets = 1
cores = 4
}
disk {
datastore_id = var.pve_storage_id
interface = "virtio0"
size = 10
}
memory {
dedicated = 4096
}
network_device {
bridge = var.admin_network.name
model = "virtio"
mac_address = var.admin_network.mac_address
}
disk {
datastore_id = var.pve_storage_id
interface = "virtio0"
size = 10
}
boot_order = ["net0"]
network_device {
bridge = var.admin_network.name
model = "virtio"
mac_address = var.admin_network.mac_address
}
operating_system {
type = "l26"
}
boot_order = ["net0"]
vga {}
serial_device{}
operating_system {
type = "l26"
}
vga {}
serial_device {}
}

View file

@ -1,73 +1,73 @@
variable "pve_node_name" {
type = string
nullable = false
type = string
nullable = false
}
variable "pve_storage_id" {
type = string
nullable = false
type = string
nullable = false
}
variable "pve_vm_id" {
type = number
nullable = false
type = number
nullable = false
}
variable "pve_ssh_user" {
type = string
nullable = false
default = "root"
type = string
nullable = false
default = "root"
}
variable "pve_ssh_host" {
type = string
nullable = false
type = string
nullable = false
}
variable "netboot_server_ip_address" {
type = string
nullable = false
type = string
nullable = false
}
variable "netboot_server_sftp_port" {
type = number
nullable = false
default = 2222
type = number
nullable = false
default = 2222
}
variable "instance_name" {
type = string
default = "poc"
type = string
default = "poc"
}
variable "admin_network" {
type = object({
name = string
prefix = string
mac_address = string
})
nullable = false
type = object({
name = string
prefix = string
mac_address = string
})
nullable = false
}
variable "prod_network" {
type = object({
name = string
prefix = string
mac_address = string
})
nullable = false
type = object({
name = string
prefix = string
mac_address = string
})
nullable = false
}
variable "monitoring_network" {
type = object({
name = string
prefix = string
mac_address = string
})
nullable = false
type = object({
name = string
prefix = string
mac_address = string
})
nullable = false
}
variable "admin_ssh_public_key" {
type = string
nullable = false
type = string
nullable = false
}

View file

@ -3,126 +3,126 @@ terraform {
}
locals {
sshd_config_file = {
path = "${var.base_config_dir}/sshd_config"
overwrite = true
user = {id = 0}
group = {id = 0}
mode = 384 # "0600"
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(templatefile(
"${path.module}/files/sshd_config.tftpl",
{
use_socket_activation = var.use_socket_activation
listen_unix = var.listen_unix
address_family = var.address_family
listen_addresses = var.listen_addresses
listen_port = var.listen_port
allow_users = join(" ", var.allow_users)
allow_groups = join(" ", var.allow_groups)
sftp_only = tostring(var.sftp_only)
allow_tcp_forwarding = tostring(var.allow_tcp_forwarding)
ciphers_algos = join(",", var.ciphers_algos)
macs_algos = join(",", var.macs_algos)
key_exchange_algos = join(",", var.key_exchange_algos)
host_key_algorithms = join(",", var.host_key_algorithms)
pub_key_accepted_algorithms = join(",", var.pub_key_accepted_algorithms)
host_keys = var.host_keys
rekey_limit = var.rekey_limit
client_alive_count_max = tostring(var.client_alive_count_max)
client_alive_interval = tostring(var.client_alive_interval)
max_auth_tries = tostring(var.max_auth_tries)
max_sessions = tostring(var.max_sessions)
max_startups = tostring(var.max_startup)
chrooted_users = var.chrooted_users
}
))
)
}
sshd_config_file = {
path = "${var.base_config_dir}/sshd_config"
overwrite = true
user = { id = 0 }
group = { id = 0 }
mode = 384 # "0600"
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(templatefile(
"${path.module}/files/sshd_config.tftpl",
{
use_socket_activation = var.use_socket_activation
listen_unix = var.listen_unix
address_family = var.address_family
listen_addresses = var.listen_addresses
listen_port = var.listen_port
allow_users = join(" ", var.allow_users)
allow_groups = join(" ", var.allow_groups)
sftp_only = tostring(var.sftp_only)
allow_tcp_forwarding = tostring(var.allow_tcp_forwarding)
ciphers_algos = join(",", var.ciphers_algos)
macs_algos = join(",", var.macs_algos)
key_exchange_algos = join(",", var.key_exchange_algos)
host_key_algorithms = join(",", var.host_key_algorithms)
pub_key_accepted_algorithms = join(",", var.pub_key_accepted_algorithms)
host_keys = var.host_keys
rekey_limit = var.rekey_limit
client_alive_count_max = tostring(var.client_alive_count_max)
client_alive_interval = tostring(var.client_alive_interval)
max_auth_tries = tostring(var.max_auth_tries)
max_sessions = tostring(var.max_sessions)
max_startups = tostring(var.max_startup)
chrooted_users = var.chrooted_users
}
))
)
}
}
disable_sshd_socket_systemd_unit = {
name = "sshd.socket"
enabled = false
}
disable_sshd_socket_systemd_unit = {
name = "sshd.socket"
enabled = false
}
enable_sshd_socket_systemd_unit = {
name = "sshd.socket"
enabled = true
dropins = [
{
name = "listen.conf"
contents = templatefile(
"${path.module}/files/sshd.socket",
{
listen_addresses = var.listen_addresses
listen_port = var.listen_port
listen_unix = var.listen_unix
}
)
}
]
}
unix_socket_tmpfile_file = {
path = "/etc/tmpfiles.d/sshd.conf"
user = {id = 0}
group = {id = 0}
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/tmpfiles.conf"))
)
}
}
use_unix_socket_files = {
false = []
true = [local.unix_socket_tmpfile_file]
}
disable_sshd_service_systemd_unit = {
name = "sshd.service"
enabled = false
}
enable_sshd_service_systemd_unit = {
name = "sshd.service"
enabled = true
}
systemd_units_on_socket_activation = {
false = [
local.disable_sshd_socket_systemd_unit,
local.enable_sshd_service_systemd_unit,
]
true = [
local.enable_sshd_socket_systemd_unit,
local.disable_sshd_service_systemd_unit,
]
}
chrooted_users = [
for idx, user in var.chrooted_users:
{
name = user.username
uid = 2000 + idx
primaryGroup = user.username
noUserGroup = true
sshAuthorizedKeys = [
user.ssh_public_key
]
}
enable_sshd_socket_systemd_unit = {
name = "sshd.socket"
enabled = true
dropins = [
{
name = "listen.conf"
contents = templatefile(
"${path.module}/files/sshd.socket",
{
listen_addresses = var.listen_addresses
listen_port = var.listen_port
listen_unix = var.listen_unix
}
)
}
]
}
chrooted_groups = [
for idx, user in var.chrooted_users:
{
name = user.username
gid = 2000 + idx
}
unix_socket_tmpfile_file = {
path = "/etc/tmpfiles.d/sshd.conf"
user = { id = 0 }
group = { id = 0 }
mode = 420 # 0644
contents = {
source = format(
"data:text/plain;base64,%s",
base64encode(file("${path.module}/files/tmpfiles.conf"))
)
}
}
use_unix_socket_files = {
false = []
true = [local.unix_socket_tmpfile_file]
}
disable_sshd_service_systemd_unit = {
name = "sshd.service"
enabled = false
}
enable_sshd_service_systemd_unit = {
name = "sshd.service"
enabled = true
}
systemd_units_on_socket_activation = {
false = [
local.disable_sshd_socket_systemd_unit,
local.enable_sshd_service_systemd_unit,
]
true = [
local.enable_sshd_socket_systemd_unit,
local.disable_sshd_service_systemd_unit,
]
}
chrooted_users = [
for idx, user in var.chrooted_users :
{
name = user.username
uid = 2000 + idx
primaryGroup = user.username
noUserGroup = true
sshAuthorizedKeys = [
user.ssh_public_key
]
}
]
chrooted_groups = [
for idx, user in var.chrooted_users :
{
name = user.username
gid = 2000 + idx
}
]
}

View file

@ -1,20 +1,20 @@
output "files" {
value = concat(
[
local.sshd_config_file,
],
local.use_unix_socket_files[var.listen_unix],
)
value = concat(
[
local.sshd_config_file,
],
local.use_unix_socket_files[var.listen_unix],
)
}
output "systemd_units" {
value = local.systemd_units_on_socket_activation[var.use_socket_activation]
value = local.systemd_units_on_socket_activation[var.use_socket_activation]
}
output "users" {
value = local.chrooted_users
value = local.chrooted_users
}
output "groups" {
value = local.chrooted_groups
value = local.chrooted_groups
}

View file

@ -1,179 +1,179 @@
variable "base_config_dir" {
type = string
nullable = false
default = "/etc/ssh"
type = string
nullable = false
default = "/etc/ssh"
}
variable "use_socket_activation" {
type = bool
nullable = false
default = true
type = bool
nullable = false
default = true
}
variable "listen_unix" {
type = bool
nullable = false
default = false
type = bool
nullable = false
default = false
}
variable "address_family" {
type = string
nullable = false
default = "inet6"
validation {
condition = contains(["any", "inet", "inet6"], var.address_family)
error_message = "Invalid address family."
}
type = string
nullable = false
default = "inet6"
validation {
condition = contains(["any", "inet", "inet6"], var.address_family)
error_message = "Invalid address family."
}
}
variable "listen_addresses" {
type = list(string)
nullable = false
default = []
validation {
condition = length(var.listen_addresses) == 0 || alltrue([
for listen_address in var.listen_addresses:
can(cidrnetmask("${listen_address}/32")) || can(cidrnetmask("${listen_address}/128"))
])
error_message = "Invalid address."
}
type = list(string)
nullable = false
default = []
validation {
condition = length(var.listen_addresses) == 0 || alltrue([
for listen_address in var.listen_addresses :
can(cidrnetmask("${listen_address}/32")) || can(cidrnetmask("${listen_address}/128"))
])
error_message = "Invalid address."
}
}
variable "listen_port" {
type = number
nullable = false
default = 22
validation {
condition = var.listen_port > 0 && var.listen_port < 65536
error_message = "Invalid port."
}
type = number
nullable = false
default = 22
validation {
condition = var.listen_port > 0 && var.listen_port < 65536
error_message = "Invalid port."
}
}
variable "ciphers_algos" {
type = list(string)
nullable = false
default = ["chacha20-poly1305@openssh.com"]
type = list(string)
nullable = false
default = ["chacha20-poly1305@openssh.com"]
}
variable "macs_algos" {
type = list(string)
nullable = false
default = ["hmac-sha2-512-etm@openssh.com"]
type = list(string)
nullable = false
default = ["hmac-sha2-512-etm@openssh.com"]
}
variable "key_exchange_algos" {
type = list(string)
nullable = false
default = ["sntrup761x25519-sha512@openssh.com", "curve25519-sha256"]
type = list(string)
nullable = false
default = ["sntrup761x25519-sha512@openssh.com", "curve25519-sha256"]
}
variable "host_key_algorithms" {
type = list(string)
nullable = false
default = ["ssh-ed25519"]
type = list(string)
nullable = false
default = ["ssh-ed25519"]
}
variable "host_keys" {
type = list(string)
nullable = false
default = []
type = list(string)
nullable = false
default = []
}
variable "pub_key_accepted_algorithms" {
type = list(string)
nullable = false
default = ["ssh-ed25519"]
type = list(string)
nullable = false
default = ["ssh-ed25519"]
}
variable "rekey_limit" {
type = object({
size = string
time = string
})
nullable = false
default = {
size = "1G"
time = "1H"
}
type = object({
size = string
time = string
})
nullable = false
default = {
size = "1G"
time = "1H"
}
}
variable "allow_users" {
type = list(string)
nullable = false
default = []
type = list(string)
nullable = false
default = []
}
variable "allow_groups" {
type = list(string)
nullable = false
default = []
type = list(string)
nullable = false
default = []
}
variable "sftp_only" {
type = bool
nullable = false
default = true
type = bool
nullable = false
default = true
}
variable "allow_tcp_forwarding" {
type = bool
nullable = false
default = false
type = bool
nullable = false
default = false
}
variable "chrooted_users" {
type = list(object({
username = string
chroot = string
ssh_public_key = string
}))
nullable = false
default = []
type = list(object({
username = string
chroot = string
ssh_public_key = string
}))
nullable = false
default = []
}
variable "client_alive_count_max" {
type = number
nullable = false
default = 6
validation {
condition = var.client_alive_count_max > 0
error_message = "Invalid Client Alive Count Max."
}
type = number
nullable = false
default = 6
validation {
condition = var.client_alive_count_max > 0
error_message = "Invalid Client Alive Count Max."
}
}
variable "client_alive_interval" {
type = number
nullable = false
default = 10
validation {
condition = var.client_alive_interval > 0
error_message = "Invalid Client Alive Interval."
}
type = number
nullable = false
default = 10
validation {
condition = var.client_alive_interval > 0
error_message = "Invalid Client Alive Interval."
}
}
variable "max_auth_tries" {
type = number
nullable = false
default = 10
validation {
condition = var.max_auth_tries > 2
error_message = "Invalid or insufficient Max Auth Tries."
}
type = number
nullable = false
default = 10
validation {
condition = var.max_auth_tries > 2
error_message = "Invalid or insufficient Max Auth Tries."
}
}
variable "max_sessions" {
type = number
nullable = false
default = 10
validation {
condition = var.max_sessions >= 0
error_message = "Invalid or insufficient Max Sessions."
}
type = number
nullable = false
default = 10
validation {
condition = var.max_sessions >= 0
error_message = "Invalid or insufficient Max Sessions."
}
}
variable "max_startup" {
type = string
nullable = false
default = "100:70:1000"
type = string
nullable = false
default = "100:70:1000"
}

View file

@ -1,7 +1,12 @@
output "castopod_config" {
value = module.castopod_config.config
value = module.castopod_config.config
}
output "caddy_config" {
value = module.caddy_config.config
value = module.caddy_config.config
}
output "nextcloud_config" {
value = module.nextcloud_config.config
sensitive = true
}

View file

@ -1,14 +1,19 @@
pve_api_base_url = "https://proxmox.broken-by-design.fr:8006/"
pve_node_name = "ns3152888"
pve_storage_id = "local"
pve_ssh_user = "root"
pve_ssh_host = "proxmox.broken-by-design.fr"
pve_api_base_url = "https://proxmox.broken-by-design.fr:8006/"
pve_node_name = "ns3152888"
pve_storage_id = "local"
pve_ssh_user = "root"
pve_ssh_host = "proxmox.broken-by-design.fr"
ssh_public_key_opentofu_netboot_server = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFQnLSYLGzUVmDMMGgEKCNgfAOkIuqhOMGGuvgskACum fmaury@fedora-home-1"
ssh_public_key_admin_netboot_server = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFQnLSYLGzUVmDMMGgEKCNgfAOkIuqhOMGGuvgskACum fmaury@fedora-home-1"
ssh_public_key_admin_netboot_server = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFQnLSYLGzUVmDMMGgEKCNgfAOkIuqhOMGGuvgskACum fmaury@fedora-home-1"
admin_network_name = "admin"
admin_network_name = "admin"
admin_network_prefix = "10.110.0.0/24"
prod_network_name = "prod"
prod_network_prefix = "10.109.0.0/24"
monit_network_name = "monit"
prod_network_name = "prod"
prod_network_prefix = "10.109.0.0/24"
monit_network_name = "monit"
monit_network_prefix = "10.111.0.0/24"
nextcloud_luks_passphrase = "tititoto"
ssh_authorized_keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ0WaC412cZLVwUXN/MyIl7nHjAd5rW5dn0wsZczjwCm fmaury@fedora-2.home"
]

View file

@ -1,100 +1,111 @@
variable "pve_api_base_url" {
description = "API URL to the Proxmox cluster"
type = string
nullable = false
description = "API URL to the Proxmox cluster"
type = string
nullable = false
}
variable "pve_api_token" {
description = "API token used to connect to the Proxmox cluster"
type = string
nullable = false
sensitive = true
description = "API token used to connect to the Proxmox cluster"
type = string
nullable = false
sensitive = true
}
variable "pve_node_name" {
description = "Name of the Proxmox node on which files and VMs should be created"
type = string
nullable = false
description = "Name of the Proxmox node on which files and VMs should be created"
type = string
nullable = false
}
variable "pve_storage_id" {
description = "Name of the Proxmox Storage on which files (ISOs) and VM disks should be created"
type = string
nullable = false
description = "Name of the Proxmox Storage on which files (ISOs) and VM disks should be created"
type = string
nullable = false
}
variable "pve_ssh_user" {
description = "User used to connect with SSH to the hypervisor to port-forward to the netboot server"
type = string
nullable = false
description = "User used to connect with SSH to the hypervisor to port-forward to the netboot server"
type = string
nullable = false
}
variable "pve_ssh_host" {
description = "Address of the hypervisor to connect to to port-forward to the netboot server"
type = string
nullable = false
description = "Address of the hypervisor to connect to to port-forward to the netboot server"
type = string
nullable = false
}
variable "admin_network_name" {
description = "Admin Network Name"
type = string
nullable = false
default = "admin"
description = "Admin Network Name"
type = string
nullable = false
default = "admin"
}
variable "admin_network_prefix" {
description = "Network prefix associated with the Admin network"
type = string
nullable = false
validation {
condition = can(cidrnetmask(var.admin_network_prefix))
error_message = "Invalid Admin network prefix"
}
description = "Network prefix associated with the Admin network"
type = string
nullable = false
validation {
condition = can(cidrnetmask(var.admin_network_prefix))
error_message = "Invalid Admin network prefix"
}
}
variable "prod_network_name" {
description = "Production Network Name"
type = string
nullable = false
default = "prod"
description = "Production Network Name"
type = string
nullable = false
default = "prod"
}
variable "prod_network_prefix" {
description = "Network prefix associated with the prod network"
type = string
nullable = false
validation {
condition = can(cidrnetmask(var.prod_network_prefix))
error_message = "Invalid Prod network prefix"
}
description = "Network prefix associated with the prod network"
type = string
nullable = false
validation {
condition = can(cidrnetmask(var.prod_network_prefix))
error_message = "Invalid Prod network prefix"
}
}
variable "monit_network_name" {
description = "Monitoring Network Name"
type = string
nullable = false
default = "monit"
description = "Monitoring Network Name"
type = string
nullable = false
default = "monit"
}
variable "monit_network_prefix" {
description = "Network prefix associated with the monit network"
type = string
nullable = false
validation {
condition = can(cidrnetmask(var.monit_network_prefix))
error_message = "Invalid monitoring network prefix"
}
description = "Network prefix associated with the monit network"
type = string
nullable = false
validation {
condition = can(cidrnetmask(var.monit_network_prefix))
error_message = "Invalid monitoring network prefix"
}
}
variable "ssh_public_key_opentofu_netboot_server" {
description = "SSH public key used by Opentofu to connect to the terraform_dhcp and terraform_ignition SFTP accounts"
type = string
nullable = false
description = "SSH public key used by Opentofu to connect to the terraform_dhcp and terraform_ignition SFTP accounts"
type = string
nullable = false
}
variable "ssh_public_key_admin_netboot_server" {
description = "SSH public key used to connect to the core account on the netboot_server instance"
type = string
nullable = false
description = "SSH public key used to connect to the core account on the netboot_server instance"
type = string
nullable = false
}
variable "nextcloud_luks_passphrase" {
type = string
nullable = false
sensitive = true
}
variable "ssh_authorized_keys" {
type = list(string)
nullable = false
}