How to setup kong with AWS RDS

I am trying to install kong with AWS RDS Postgres on an ec2. Here is the kong manifest file:

admin:
  annotations:
    konghq.com/protocol: https
  enabled: true
  http:
    enabled: false
  ingress:
    annotations:
      konghq.com/https-redirect-status-code: "301"
      konghq.com/protocols: https
      konghq.com/strip-path: "true"
      nginx.ingress.kubernetes.io/app-root: /
      nginx.ingress.kubernetes.io/backend-protocol: HTTPS
      nginx.ingress.kubernetes.io/permanent-redirect-code: "301"
    enabled: true
    hostname: kong.127-0-0-1.nip.io
    path: /api
    tls: kong-admin-cert
  tls:
    containerPort: 8444
    enabled: true
    parameters:
    - http2
    servicePort: 8444
  type: ClusterIP
affinity:
  podAntiAffinity:
    preferredDuringSchedulingIgnoredDuringExecution:
    - podAffinityTerm:
        labelSelector:
          matchExpressions:
          - key: app.kubernetes.io/instance
            operator: In
            values:
            - dataplane
        topologyKey: kubernetes.io/hostname
      weight: 100
certificates:
  enabled: true
  issuer: kong-selfsigned-issuer
  cluster:
    enabled: true
  admin:
    enabled: true
    commonName: kong.127-0-0-1.nip.io
  portal:
    enabled: true
    commonName: developer.127-0-0-1.nip.io
  proxy:
    enabled: true
    commonName: 127-0-0-1.nip.io
    dnsNames:
    - '*.127-0-0-1.nip.io'
cluster:
  enabled: true
  labels:
    konghq.com/service: cluster
  tls:
    containerPort: 8005
    enabled: true
    servicePort: 8005
  type: ClusterIP
clustertelemetry:
  enabled: true
  tls:
    containerPort: 8006
    enabled: true
    servicePort: 8006
    type: ClusterIP
deployment:
  kong:
    daemonset: false
    enabled: true
enterprise:
  enabled: true
  license_secret: kong-enterprise-license
  portal:
    enabled: true
  rbac:
    admin_api_auth: basic-auth
    admin_gui_auth_conf_secret: kong-config-secret
    enabled: true
    session_conf_secret: kong-config-secret
  smtp:
    enabled: false
  vitals:
    enabled: true
env:
  admin_access_log: /dev/stdout
  admin_gui_api_url: https://kong.127-0-0-1.nip.io/api
  admin_error_log: /dev/stdout
  admin_gui_access_log: /dev/stdout
  admin_gui_error_log: /dev/stdout
  admin_gui_host: kong.127-0-0-1.nip.io
  admin_gui_protocol: https
  admin_gui_url: https://kong.127-0-0-1.nip.io/
  cluster_data_plane_purge_delay: 60
  cluster_listen: 0.0.0.0:8005
  cluster_telemetry_listen: 0.0.0.0:8006
  database: postgres
  log_level: debug
  lua_package_path: /opt/?.lua;;
  nginx_worker_processes: "2"
  password:
    valueFrom:
      secretKeyRef:
        key: kong_admin_password
        name: kong-config-secret
  pg_database: kong
  pg_host:
    valueFrom:
      secretKeyRef:
        key: kong_pg_host
        name: kong-config-secret
  pg_ssl: "off"
  pg_ssl_verify: "off"
  pg_user:
    valueFrom:
      secretKeyRef:
        key: kong_pg_user
        name: kong-config-secret
  pg_password:
    valueFrom:
      secretKeyRef:
        key: kong_pg_password
        name: kong-config-secret
  pg_port: 5432
  plugins: bundled,openid-connect
  portal: true
  portal_api_access_log: /dev/stdout
  portal_api_error_log: /dev/stdout
  portal_api_url: https://developer.127-0-0-1.nip.io/api
  portal_auth: basic-auth
  portal_cors_origins: '*'
  portal_gui_access_log: /dev/stdout
  portal_gui_error_log: /dev/stdout
  portal_gui_host: developer.127-0-0-1.nip.io
  portal_gui_protocol: https
  portal_gui_url: https://developer.127-0-0-1.nip.io/
  portal_session_conf:
    valueFrom:
      secretKeyRef:
        key: portal_session_conf
        name: kong-config-secret
  prefix: /kong_prefix/
  proxy_access_log: /dev/stdout
  proxy_error_log: /dev/stdout
  proxy_stream_access_log: /dev/stdout
  proxy_stream_error_log: /dev/stdout
  smtp_mock: "on"
  status_listen: 0.0.0.0:8100
  trusted_ips: 0.0.0.0/0,::/0
  vitals: true
extraLabels:
  konghq.com/component: bb
image:
  repository: kong/kong-gateway
  tag: "3.5"
ingressController:
  enabled: true
  env:
    kong_admin_filter_tag: ingress_controller_default
    kong_admin_tls_skip_verify: true
    kong_admin_token:
      valueFrom:
        secretKeyRef:
          key: password
          name: kong-config-secret
    kong_admin_url: https://localhost:8444
    kong_workspace: default
    publish_service: kong/bb-kong-proxy
  image:
    repository: docker.io/kong/kubernetes-ingress-controller
    tag: "2.10"
  ingressClass: default
  installCRDs: false
manager:
  annotations:
    konghq.com/protocol: https
  enabled: true
  http:
    containerPort: 8002
    enabled: false
    servicePort: 8002
  ingress:
    annotations:
      konghq.com/https-redirect-status-code: "301"
      nginx.ingress.kubernetes.io/backend-protocol: HTTPS
    ingressClassName: kong
    enabled: true
    hostname: kong.127-0-0-1.nip.io
    path: /
    tls: kong-admin-cert
  tls:
    containerPort: 8445
    enabled: true
    parameters:
    - http2
    servicePort: 8445
  type: ClusterIP
migrations:
  enabled: true
  postUpgrade: true
  preUpgrade: true
namespace: kong
podAnnotations:
  kuma.io/gateway: enabled
portal:
  annotations:
    konghq.com/protocol: https
  enabled: true
  http:
    containerPort: 8003
    enabled: false
    servicePort: 8003
  ingress:
    annotations:
      konghq.com/https-redirect-status-code: "301"
      konghq.com/protocols: https
      konghq.com/strip-path: "false"
    ingressClassName: kong
    enabled: true
    hostname: developer.127-0-0-1.nip.io
    path: /
    tls: kong-portal-cert
  tls:
    containerPort: 8446
    enabled: true
    parameters:
    - http2
    servicePort: 8446
  type: ClusterIP
portalapi:
  annotations:
    konghq.com/protocol: https
  enabled: true
  http:
    enabled: false
  ingress:
    annotations:
      konghq.com/https-redirect-status-code: "301"
      konghq.com/protocols: https
      konghq.com/strip-path: "true"
      nginx.ingress.kubernetes.io/app-root: /
    ingressClassName: kong
    enabled: true
    hostname: developer.127-0-0-1.nip.io
    path: /api
    tls: kong-portal-cert
  tls:
    containerPort: 8447
    enabled: true
    parameters:
    - http2
    servicePort: 8447
  type: ClusterIP
postgresql:
  enabled: false
proxy:
  annotations:
    prometheus.io/port: "9542"
    prometheus.io/scrape: "true"
  enabled: true
  http:
    containerPort: 8080
    enabled: true
    hostPort: 80
  ingress:
    enabled: false
  labels:
    enable-metrics: true
  tls:
    containerPort: 8443
    enabled: true
    hostPort: 443
  type: LoadBalancer
replicaCount: 1
secretVolumes: []
status:
  enabled: true
  http:
    containerPort: 8100
    enabled: true
  tls:
    containerPort: 8543
    enabled: false

This is same as mentioned in the docs for installation with helm.
After applying the manifest two pods are created:

NAME                                             READY   STATUS     RESTARTS   AGE
bb-kong-78f7b9fddd-n4n9t        0/2          Init:1/2      0                    12s
bb-kong-init-migrations-fdzmt   1/1          Running    0                    12s

The 2nd one status changes to “Error” after few seconds. When I describe the first pod, it says its waiting for “wait-for-db” container.
Upon checking the logs of wait-for-db, it is throwing the following error multiple time

2024/01/06 19:06:03 [warn] the 'admin_api_uri' configuration property is deprecated, use 'admin_gui_api_url' instead
Error: [PostgreSQL error] failed to retrieve PostgreSQL server_version_num: [cosocket] DNS resolution failed: failed to receive reply from UDP server 10.96.0.10:53: timeout. Tried: ["(short)app.cfvf7mb3t.ap-south-1.rds.amazonaws.com:(na) - cache-miss","app.cfvf7mb3t.ap-south-1.rds.amazonaws.com:33 - cache-miss/querying"]

  Run with --v (verbose) or --vv (debug) for more details
waiting for db

Note:

  • I have not mentioned the reall value of the RDS host here.
  • I have tried connecting to the RDS from other means and it is working fine, even through psql from the same ec2 where k8s is running.

@shaiqkar For AWS RDS, since the communication is between two AWS services, you need to create an IAM role. Have you tried this ?