Pendahuluan
Ketika membangun database seperti RDS dengan Terraform, Anda mungkin ingin mengeksekusi skrip SQL seperti membuat tabel dengan perintah CREATE
atau menambahkan data dengan perintah INSERT
. Artikel ini menjelaskan cara mengeksekusi perintah SQL di Terraform.
Pernyataan SQL yang ingin Anda terapkan
Sebagai contoh, misalkan Anda ingin menerapkan ddl.sql
berikut ini ke resource DB untuk membangun database untuk menyimpan data pembelian.
CREATE TABLE IF NOT EXISTS items
(
id char(36) NOT NULL,
name varchar(36) NOT NULL,
description varchar(255) NOT NULL,
price int NOT NULL,
PRIMARY KEY (id)
)
ENGINE=InnoDB DEFAULT CHARSET=utf8;
CREATE TABLE IF NOT EXISTS sales
(
id int UNSIGNED NOT NULL AUTO_INCREMENT,
item_id char(36) NOT NULL,
amount int UNIQUE NOT NULL,
purchased_at bigint(14) UNSIGNED NOT NULL
PRIMARY KEY (id)
)
ENGINE=InnoDB DEFAULT CHARSET=utf8;
ALTER TABLE items ADD FOREIGN KEY (item_id) REFERENCES items (id) ON DELETE RESTRICT ON UPDATE RESTRICT;
Juga, misalkan Anda ingin menambahkan data sampel seperti data.sql
setelah membuat tabel.
INSERT INTO items (id, name, description, price)
VALUES
(
'5b0338ac-b12e-42cb-b2c5-a5a4c6533403',
'Pencil',
'This is a pencil.',
12
),
(
'839156ab-6320-4c4b-a38a-43d34aa57aaa',
'Eraser',
'This is an eraser.',
11
);
INSERT INTO sales (item_id, amount, purchased_at)
VALUES
(
'5b0338ac-b12e-42cb-b2c5-a5a4c6533403',
5,
1661089540000
),
(
'839156ab-6320-4c4b-a38a-43d34aa57aaa',
1,
1661089540000
);
null_resource
Dalam artikel ini, saya akan memperkenalkan kode Terraform untuk membangun Aurora Serverless v1 sebagai database dan mengeluarkan SQL darinya melalui Data API. Struktur direktori adalah sebagai berikut.
.
├── rds.tf
├── ddl.sql
└── data.sql
Untuk mengeksekusi SQL di Terraform, gunakan resource null_resource
dan tentukan file SQL, perintah eksekusi, dan variabel lingkungan. Dalam hal ini, SQL dieksekusi melalui Data API, sehingga kodenya adalah sebagai berikut.
resource "aws_rds_cluster" "main" {
.
.
.
}
resource "null_resource" "db_ddl" {
triggers = {
file = filesha1("ddl.sql")
}
provisioner "local-exec" {
command = <<-EOF
while read line; do
echo "$line"
aws rds-data execute-statement --resource-arn "$DB_ARN" --database "$DB_NAME" --secret-arn "$SECRET_ARN" --sql "$line"
done < <(awk 'BEGIN{RS=";\n"}{gsub(/\n/,""); if(NF>0) {print $0";"}}' ddl.sql)
EOF
environment = {
DB_ARN = aws_rds_cluster.main.arn
DB_NAME = aws_rds_cluster.main.database_name
SECRET_ARN = aws_secretsmanager_secret.rds.arn
}
interpreter = ["bash", "-c"]
}
}
resource "null_resource" "sample_data" {
triggers = {
file = filesha1("data.sql")
}
provisioner "local-exec" {
command = <<-EOF
while read line; do
echo "$line"
aws rds-data execute-statement --resource-arn "$DB_ARN" --database "$DB_NAME" --secret-arn "$SECRET_ARN" --sql "$line"
done < <(awk 'BEGIN{RS=";\n"}{gsub(/\n/,""); if(NF>0) {print $0";"}}' data.sql)
EOF
environment = {
DB_ARN = aws_rds_cluster.main.arn
DB_NAME = aws_rds_cluster.main.database_name
SECRET_ARN = aws_secretsmanager_secret.rds.arn
}
interpreter = ["bash", "-c"]
}
depends_on = [
null_resource.db_ddl
]
}
Jika Anda ingin terhubung ke MYSQL dan mengeluarkan SQL alih-alih melalui Data API, kodenya adalah sebagai berikut
resource "null_resource" "db_ddl" {
triggers = {
file = filesha1("ddl.sql")
}
- provisioner "local-exec" {
- command = <<-EOF
- while read line; do
- echo "$line"
- aws rds-data execute-statement --resource-arn "$DB_ARN" --database "$DB_NAME" --secret-arn "$SECRET_ARN" - -sql "$line"
- done < <(awk 'BEGIN{RS=";\n"}{gsub(/\n/,""); if(NF>0) {print $0";"}}' ddl.sql)
- EOF
- environment = {
- DB_ARN = aws_rds_cluster.main.arn
- DB_NAME = aws_rds_cluster.main.database_name
- SECRET_ARN = aws_secretsmanager_secret.rds.arn
- }
- interpreter = ["bash", "-c"]
- }
+ provisioner "local-exec" {
+ command = "mysql --host=${aws_rds_cluster.main.endpoint} --port=3306 --user=${aws_rds_cluster.main.master_username} --password=${aws_rds_cluster.main.master_password} --database=${aws_rds_cluster.main.database_name} < ${file(${ddl.sql)}"
+ }
}
Juga, jika Anda ingin menjalankan data.sql
hanya di lingkungan dev, tambahkan count
sebagai berikut
resource "null_resource" "sample_data" {
+ count = terraform.workspace == "dev" ? 1 : 0
triggers = {
file = filesha1("data.sql")
}
provisioner "local-exec" {
command = <<-EOF
while read line; do
echo "$line"
aws rds-data execute-statement --resource-arn "$DB_ARN" --database "$DB_NAME" --secret-arn "$SECRET_ARN" --sql "$line"
done < <(awk 'BEGIN{RS=";\n"}{gsub(/\n/,""); if(NF>0) {print $0";"}}' data.sql)
EOF
environment = {
DB_ARN = aws_rds_cluster.main.arn
DB_NAME = aws_rds_cluster.main.database_name
SECRET_ARN = aws_secretsmanager_secret.rds.arn
}
interpreter = ["bash", "-c"]
}
depends_on = [
null_resource.db_ddl
]
}
Kode yang sudah selesai
Kode akhir akan terlihat seperti ini
resource "aws_rds_cluster" "main" {
cluster_identifier = local.aurora_cluster_identifier
database_name = local.aurora_database_name
master_username = local.aurora_master_username
master_password = local.aurora_master_password
db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.main.name
db_subnet_group_name = data.aws_db_subnet_group.main.name
deletion_protection = true
skip_final_snapshot = false
engine = "aurora-mysql"
engine_version = "5.7.mysql_aurora.2.07.1"
engine_mode = "serverless"
backup_retention_period = 7
storage_encrypted = true
iam_database_authentication_enabled = false
enable_http_endpoint = true
scaling_configuration {
min_capacity = 2
max_capacity = 256
auto_pause = true
}
lifecycle {
ignore_changes = [
availability_zones,
engine_version,
master_username,
master_password,
]
}
}
resource "null_resource" "db_ddl" {
triggers = {
file = filesha1("ddl.sql")
}
provisioner "local-exec" {
command = <<-EOF
while read line; do
echo "$line"
aws rds-data execute-statement --resource-arn "$DB_ARN" --database "$DB_NAME" --secret-arn "$SECRET_ARN" --sql "$line"
done < <(awk 'BEGIN{RS=";\n"}{gsub(/\n/,""); if(NF>0) {print $0";"}}' ddl.sql)
EOF
environment = {
DB_ARN = aws_rds_cluster.main.arn
DB_NAME = aws_rds_cluster.main.database_name
SECRET_ARN = aws_secretsmanager_secret.rds.arn
}
interpreter = ["bash", "-c"]
}
}
resource "null_resource" "sample_data" {
count = terraform.workspace == "dev" ? 1 : 0
triggers = {
file = filesha1("data.sql")
}
provisioner "local-exec" {
command = <<-EOF
while read line; do
echo "$line"
aws rds-data execute-statement --resource-arn "$DB_ARN" --database "$DB_NAME" --secret-arn "$SECRET_ARN" --sql "$line"
done < <(awk 'BEGIN{RS=";\n"}{gsub(/\n/,""); if(NF>0) {print $0";"}}' data.sql)
EOF
environment = {
DB_ARN = aws_rds_cluster.main.arn
DB_NAME = aws_rds_cluster.main.database_name
SECRET_ARN = aws_secretsmanager_secret.rds.arn
}
interpreter = ["bash", "-c"]
}
depends_on = [
null_resource.db_ddl
]
}
Referensi