代码之家  ›  专栏  ›  技术社区  ›  R. Barrett

Terraform EKS在读取提供程序中的群集数据时出现问题

  •  0
  • R. Barrett  · 技术社区  · 2 年前

    从中读取EKS群集模块的数据时出现问题 kubernetes helm 供应商。我刚开始使用 cluster_name 并尝试使用读取数据 data 。然而,我遇到了一个问题,因为资源还不存在,所以我无法读取数据。所以我需要一种直接从模块中读取它的方法。唉,这就是我的问题所在。以下是错误代码:

    │ Error: Unsupported attribute
    │ 
    │   on providers.tf line 37, in provider "kubernetes":
    │   37:   cluster_ca_certificate = base64decode(module.primary.cluster_certificate_authority_data.data)
    │     ├────────────────
    │     │ module.primary.cluster_certificate_authority_data is "**********************"
    │ 
    │ Can't access attributes on a primitive-typed value (string).
    ╵
    ╷
    │ Error: Unsupported attribute
    │ 
    │   on providers.tf line 54, in provider "helm":
    │   54:     cluster_ca_certificate = base64decode(module.primary.cluster_certificate_authority_data.data)
    │     ├────────────────
    │     │ module.primary.cluster_certificate_authority_data is "**********************"
    │ 
    │ Can't access attributes on a primitive-typed value (string).
    

    以下是我的地形在主模块中的样子:

    main.tf

    ################################################
    #          KMS CLUSTER ENCRYPTION KEY          #
    ################################################
    module "kms" {
      source  = "terraform-aws-modules/kms/aws"
      version = "1.1.0"
    
      aliases               = ["eks/${var.cluster_name}__cluster_encryption_key_test"]
      description           = "${var.cluster_name} cluster encryption key"
      enable_default_policy = true
      key_owners            = [data.aws_caller_identity.current.arn]
    
      tags = local.tags
    }
    
    ##################################
    #       KUBERNETES CLUSTER       #
    ##################################
    module "primary" {
      source  = "terraform-aws-modules/eks/aws"
      version = "~> 19.13.1"
    
      cluster_name                    = var.cluster_name
      cluster_version                 = var.cluster_version
      cluster_endpoint_private_access = var.cluster_endpoint_private_access
      cluster_endpoint_public_access  = var.cluster_endpoint_public_access
    
      create_kms_key = false
      cluster_encryption_config = {
        resources        = ["secrets"]
        provider_key_arn = module.kms.key_arn
      }
    
      create_cni_ipv6_iam_policy = var.create_cni_ipv6_iam_policy
      manage_aws_auth_configmap  = true
      aws_auth_roles             = var.aws_auth_roles
    
      vpc_id     = var.vpc_id
      subnet_ids = var.subnet_ids
    
      eks_managed_node_group_defaults = {
        ami_type       = var.ami_type
        disk_size      = var.disk_size
        instance_types = var.instance_types
    
        iam_role_attach_cni_policy = var.iam_role_attach_cni_policy
      }
    
      eks_managed_node_groups = {
        primary = {
          min_size     = 1
          max_size     = 5
          desired_size = 1
    
          capacity_type = "ON_DEMAND"
        }
        secondary = {
          min_size     = 1
          max_size     = 5
          desired_size = 1
    
          capacity_type = "SPOT"
        }
      }
    
      cluster_addons = {
        coredns = {
          most_recent                 = true
    
          resolve_conflicts_on_create = "OVERWRITE"
          resolve_conflicts_on_update = "PRESERVE"
    
          timeouts = {
            create = "20m"
            delete = "20m"
            update = "20m"
          }
        }
        kube-proxy = {
          most_recent       = true
          resolve_conflicts_on_create = "OVERWRITE"
          resolve_conflicts_on_update = "PRESERVE"
    
          timeouts = {
            create = "20m"
            delete = "20m"
            update = "20m"
          }
        }
        aws-ebs-csi-driver = {
          most_recent       = true
          resolve_conflicts_on_create = "OVERWRITE"
          resolve_conflicts_on_update = "PRESERVE"
    
          timeouts = {
            create = "20m"
            delete = "20m"
            update = "20m"
          }
        }
        vpc-cni = {
          most_recent       = true
          resolve_conflicts_on_create = "OVERWRITE"
          resolve_conflicts_on_update = "PRESERVE"
    
          timeouts = {
            create = "20m"
            delete = "20m"
            update = "20m"
          }
        }
      }
    
      fargate_profiles = {
        default = {
          name = "default"
          selectors = [
            {
              namespace = "kube-system"
              labels = {
                k8s-app = "kube-dns"
              }
            },
            {
              namespace = "default"
            }
          ]
    
          timeouts = {
            create = "20m"
            delete = "20m"
          }
        }
      }
    
      tags = {
        repo  = "https://github.com/impinj-di/terraform-aws-eks-primary"
        team  = "di"
        owner = "[email protected]"
      }
    }
    
    ####################################
    #       KUBERNETES RESOURCES       #
    ####################################
    resource "kubernetes_namespace" "this" {
      depends_on = [module.primary]
      for_each   = toset(local.eks_namespaces)
      metadata {
        name = each.key
      }
    }
    

    这是我的 providers.tf :

    terraform {
      required_version = ">= 1.3.7"
    
      required_providers {
        aws = ">= 4.12.0"
        # harness = {
        #   source = "harness/harness"
        #   version = "0.21.0"
        # }
        helm = {
          source = "hashicorp/helm"
          version = "2.9.0"
        }
        kubernetes = {
          source = "hashicorp/kubernetes"
          version = "2.11.0"
        }
      }
    }
    
    terraform {
      backend "s3" {
        bucket  = "impinj-canary-terraform"
        key     = "terraform-aws-eks-primary.tfstate"
        region  = "us-west-2"
        encrypt = true
      }
    }
    
    provider "aws" {
      alias  = "sec"
      region = "us-west-2"
    }
    
    provider "kubernetes" {
      host                   = module.primary.cluster_endpoint
      cluster_ca_certificate = base64decode(module.primary.cluster_certificate_authority_data.data)
      exec {
        api_version = "client.authentication.k8s.io/v1beta1"
        command     = "aws"
        args = ["eks", "get-token", "--cluster-name", module.primary.cluster_name]
      }
    }
    
    # provider "harness" {
    #   endpoint         = "https://app.harness.io/gateway"
    #   account_id       = var.harness_account_id
    #   platform_api_key = var.harness_platform_api_key
    # }
    
    provider "helm" {
      kubernetes {
        host                   = module.primary.cluster_endpoint
        cluster_ca_certificate = base64decode(module.primary.cluster_certificate_authority_data.data)
        exec {
          api_version = "client.authentication.k8s.io/v1beta1"
          command     = "aws"
          args        = ["eks", "get-token", "--cluster-name", module.primary.cluster_name]
        }
      }
    }
    
    0 回复  |  直到 2 年前
        1
  •  1
  •   ishuar    2 年前

    问题在于此配置 cluster_ca_certificate = base64decode(module.primary.cluster_certificate_authority_data.data) 到目前为止 Can't access attributes on a primitive-typed value (string). 有关错误。

    输出 cluster_certificate_authority_data 在地形中,aws-eks模块已经包含来自的值 aws_eks_cluster.this[0].certificate_authority[0].data 因此这里的正确参考 cluster_ca_certificate 应该是 base64decode(module.primary.cluster_certificate_authority_data)

    provider "kubernetes" {
      host                   = module.primary.cluster_endpoint
      cluster_ca_certificate = base64decode(module.primary.cluster_certificate_authority_data)
      exec {
        api_version = "client.authentication.k8s.io/v1beta1"
        command     = "aws"
        args = ["eks", "get-token", "--cluster-name", module.primary.cluster_name]
      }
    }
    
    
    provider "helm" {
      kubernetes {
        host                   = module.primary.cluster_endpoint
        cluster_ca_certificate = base64decode(module.primary.cluster_certificate_authority_data)
        exec {
          api_version = "client.authentication.k8s.io/v1beta1"
          command     = "aws"
          args        = ["eks", "get-token", "--cluster-name", module.primary.cluster_name]
        }
      }
    }
    

    可选信息(与您的问题无关)

    一般来说,我还建议将EKS集群部署和Kubernetes资源/工作负载部署分开。通过将两个提供者的资源保持在不同的Terraform状态,我们可以将更改范围限制在EKS集群或Kubernetes资源上。 hashicorp /terraform-provider-kubernetes 正式建议

    推荐文章