代码之家  ›  专栏  ›  技术社区  ›  Tom McLean

如何使用证书管理器制作的证书在AWS上创建到mongodb实例的TLS/SSL连接?健康检查失败

  •  0
  • Tom McLean  · 技术社区  · 6 月前

    我试图在AWS上部署一个可公开访问的MongoDB实例,我有一个terraform配置来部署它:

    terraform {
      backend "s3" {
        bucket  = "terraform-state"
        key     = "mongodb/terraform.tfstate"
        region  = "eu-west-2"
        encrypt = "true"
      }
    }
    
    terraform {
      required_providers {
        aws = {
          source  = "hashicorp/aws"
          version = "~> 5.78.0"
        }
      }
      required_version = ">= 0.14.5"
    }
    
    provider "aws" {
      region = "eu-west-2" # Specify your desired AWS region
    
      default_tags {
        tags = {
          CreatedBy = "Tom McLean"
          Terraform = "true"
        }
      }
    }
    
    resource "aws_security_group" "alb_sg" {
      name_prefix = "mongodb-alb-sg-"
      vpc_id      = var.vpc_id
      ingress {
        from_port   = 27017
        to_port     = 27017
        protocol    = "tcp"
        cidr_blocks = ["0.0.0.0/0"]
      }
      egress {
        from_port   = 0
        to_port     = 0
        protocol    = "-1"
        cidr_blocks = ["0.0.0.0/0"]
      }
    }
    
    resource "aws_security_group" "lb_sg" {
      name_prefix = "mongodb-lb-sg"
      vpc_id = var.vpc_id
      ingress {
        from_port = 27017
        to_port = 27017
        protocol = "tcp"
        security_groups = [aws_security_group.alb_sg.id]
      }
    
      egress {
        from_port = 0
        to_port = 0
        protocol = "-1"
        cidr_blocks = ["0.0.0.0/0"]
      }
    }
    
    resource "aws_security_group" "mongodb_sg" {
      name        = "mongodb-security-group"
      description = "Security group for MongoDB"
      vpc_id = var.vpc_id
    
      ingress {
        from_port   = 22
        to_port     = 22
        protocol    = "tcp"
        cidr_blocks = ["0.0.0.0/0"]
      }
    
      ingress {
        from_port   = 27017
        to_port     = 27017
        protocol    = "tcp"
        security_groups = [aws_security_group.lb_sg.id]
      }
    
      egress {
        from_port   = 0
        to_port     = 0
        protocol    = -1
        cidr_blocks = ["0.0.0.0/0"]
      }
    }
    
    resource "aws_instance" "mongodb" {
      ami             = "ami-091f18e98bc129c4e" # Ubuntu
      instance_type   = var.instance_type
      vpc_security_group_ids = [aws_security_group.mongodb_sg.id]
      subnet_id = element(var.subnet_ids, 0)
      user_data = templatefile("${path.module}/user-data.sh", {
        device_name    = var.device_name,
        MONGO_USER     = var.mongo_user,
        MONGO_PASSWORD = var.mongo_password
        MONGO_VERSION = var.mongo_version
      })
      user_data_replace_on_change = true
    
      root_block_device {
        volume_size = 32
        volume_type = "gp2"
        delete_on_termination = false
      }
    
      tags = {
        Name = "MongoDB"
      }
    }
    
    resource "aws_ebs_volume" "mongodb_volume" {
      availability_zone = aws_instance.mongodb.availability_zone
      size              = var.ebs_size
      type              = "gp2"
      tags = {
        Name = "MongoDB-EBS"
      }
    }
    
    resource "aws_volume_attachment" "mongodb_attachment" {
      device_name = var.device_name
      volume_id   = aws_ebs_volume.mongodb_volume.id
      instance_id = aws_instance.mongodb.id
    }
    
    resource "aws_lb" "mongodb_lb" {
      name = "mongodb-lb"
      internal = false
      load_balancer_type = "network"
      security_groups = [aws_security_group.alb_sg.id]
      subnets = var.subnet_ids
    }
    
    resource "aws_lb_target_group" "mongodb_tg" {
      name = "mongodb-tg"
      port = 27017
      protocol = "TCP"
      vpc_id = var.vpc_id
      target_type = "instance"
    
      health_check {
        healthy_threshold   = 3
        interval            = 30
        port                = 27017
        protocol            = "TCP"
        timeout             = 5
        unhealthy_threshold = 3
      }
    
      tags = {
        Name = "MongoDB-tg"
      }
    }
    
    resource "aws_lb_target_group_attachment" "mongodb_attachment" {
      target_group_arn = aws_lb_target_group.mongodb_tg.arn
      target_id = aws_instance.mongodb.id
      port = 27017
    }
    
    resource "aws_lb_listener" "mongodb_listener" {
      load_balancer_arn = aws_lb.mongodb_lb.arn
      protocol = "TLS"
      port = 27017
      ssl_policy = "ELBSecurityPolicy-2016-08"
      certificate_arn = var.certificate_arn
    
      default_action {
        type = "forward"
        target_group_arn = aws_lb_target_group.mongodb_tg.arn
      }
    }
    
    resource "aws_route53_record" "mongodb_dns" {
      zone_id = data.aws_route53_zone.main.zone_id
      name = "${var.domain_prefix}.${var.domain_name}"
      type = "A"
    
      alias {
        name = aws_lb.mongodb_lb.dns_name
        zone_id = aws_lb.mongodb_lb.zone_id
        evaluate_target_health = true
      }
    }
    
    data "aws_route53_zone" "main" {
        name = "${var.domain_name}"
        private_zone = false
    }
    

    user-data.sh为:

    #!/bin/bash
    
    # Install docker
    apt-get update -y
    apt-get install -y docker.io
    systemctl start docker
    usermod -aG docker ubuntu
    
    DEVICE="${device_name}"
    MOUNT_POINT="/var/lib/mongodb"
    
    # Wait for the device to become available
    while [ ! -e "$DEVICE" ]; do
        echo "$DEVICE not yet available, waiting..."
        sleep 5
    done
    
    mkdir -p $MOUNT_POINT
    
    # Format the volume if it's not formatted yet
    if ! blkid $DEVICE | grep ext4 > /dev/null; then
        mkfs.ext4 $DEVICE
    fi
    
    mount $DEVICE $MOUNT_POINT
    
    # Ensure the volume mounts automatically after reboot
    echo "$DEVICE $MOUNT_POINT ext4 defaults,nofail,x-systemd.device-timeout=10s 0 2" >> /etc/fstab
    
    resize2fs $DEVICE
    
    chown -R 999:999 $MOUNT_POINT
    
    # Create a systemd service for MongoDB
    cat > /etc/systemd/system/mongodb.service <<EOL
    [Unit]
    Description=MongoDB container
    After=docker.service
    Requires=docker.service
    
    [Service]
    Restart=always
    ExecStartPre=-/usr/bin/docker rm -f mongodb
    ExecStart=/usr/bin/docker run --name=mongodb \
      -p 27017:27017 \
      -v $MOUNT_POINT:/data/db \
      -e MONGO_INITDB_ROOT_USERNAME=${MONGO_USER} \
      -e MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD} \
      --user 999:999 \
      mongo:${MONGO_VERSION}
    ExecStop=/usr/bin/docker stop mongodb
    
    [Install]
    WantedBy=multi-user.target
    EOL
    
    # Enable and start MongoDB systemd service
    systemctl daemon-reload
    sysemctl enable mongodb
    systemctl start mongodb
    

    我可以看到该实例正在运行mongodb docker容器,并在端口27017上监听:

    ubuntu@ip:~$ docker container ls
    CONTAINER ID   IMAGE          COMMAND                  CREATED          STATUS          PORTS                                           NAMES
    49e5ed319c76   mongo:latest   "docker-entrypoint.s…"   10 minutes ago   Up 10 minutes   0.0.0.0:27017->27017/tcp, :::27017->27017/tcp   mongodb
    

    然而,健康检查失败了: enter image description here

    我考虑过的替代解决方案是使用应用程序负载均衡器,但它们在TCP连接上不起作用,我考虑过在mongo实例上自行完成证书工作,但我无法下载我的域证书的.pem文件,因为它是由亚马逊证书管理器颁发的。如何在AWS中与我的mongodb实例进行TLS/SSL连接?

    1 回复  |  直到 6 月前
        1
  •  1
  •   Mark B    6 月前

    您有两个安全组 alb_sg lb_sg 它们具有相同的安全组规则。那么你有一个 mongodb_sg 只允许来自MongoDB的流量的安全组 lb_sg 安全组。

    您正在分配 alb_sg 到网络负载平衡器,您正在分配 mongodb_sg EC2实例。因此,按照您当前的配置方式,负载均衡器使用 alb_sg 无法连接到正在使用的EC2实例 mongodb_sg ,因为EC2实例只允许来自 lb_sg ,它没有分配给任何东西。

    您需要删除其中之一 alb_sg lb_sg 为了消除混淆,并确保您的EC2安全组允许来自您实际分配给负载均衡器的安全组的传入请求。