diff --git a/python/cloudtik/core/_private/cluster/cluster_scaler.py b/python/cloudtik/core/_private/cluster/cluster_scaler.py index f0a6a8831..a91f77c4d 100644 --- a/python/cloudtik/core/_private/cluster/cluster_scaler.py +++ b/python/cloudtik/core/_private/cluster/cluster_scaler.py @@ -49,14 +49,14 @@ from cloudtik.core._private.utils import validate_config, \ hash_launch_conf, hash_runtime_conf, \ format_info_string, get_commands_to_run, with_head_node_ip_environment_variables, \ - encode_cluster_secrets, _get_node_specific_commands, _get_node_specific_config, \ + _get_node_specific_commands, _get_node_specific_config, \ _get_node_specific_docker_config, _get_node_specific_runtime_config, \ _has_node_type_specific_runtime_config, get_runtime_config_key, RUNTIME_CONFIG_KEY, \ process_config_with_privacy, decrypt_config, CLOUDTIK_CLUSTER_SCALING_STATUS, get_runtime_encryption_key, \ - with_runtime_encryption_key + with_runtime_encryption_key, PROVIDER_STORAGE_CONFIG_KEY, PROVIDER_DATABASE_CONFIG_KEY from cloudtik.core._private.constants import CLOUDTIK_MAX_NUM_FAILURES, \ CLOUDTIK_MAX_LAUNCH_BATCH, CLOUDTIK_MAX_CONCURRENT_LAUNCHES, \ - CLOUDTIK_UPDATE_INTERVAL_S, CLOUDTIK_HEARTBEAT_TIMEOUT_S, CLOUDTIK_RUNTIME_ENV_SECRETS, \ + CLOUDTIK_UPDATE_INTERVAL_S, CLOUDTIK_HEARTBEAT_TIMEOUT_S, \ CLOUDTIK_SCALER_PERIODIC_STATUS_LOG logger = logging.getLogger(__name__) @@ -981,7 +981,8 @@ def _update_runtime_hashes(self, new_config): "worker_setup_commands": get_commands_to_run(new_config, "worker_setup_commands"), "worker_start_commands": get_commands_to_run(new_config, "worker_start_commands"), "runtime": new_config.get(RUNTIME_CONFIG_KEY, {}), - "storage": new_config["provider"].get("storage", {}) + "storage": new_config["provider"].get(PROVIDER_STORAGE_CONFIG_KEY, {}), + "database": new_config["provider"].get(PROVIDER_DATABASE_CONFIG_KEY, {}) } (new_runtime_hash, new_file_mounts_contents_hash, diff --git a/python/cloudtik/core/_private/runtime_factory.py b/python/cloudtik/core/_private/runtime_factory.py index 7919ea8cb..9c6b2b323 100644 --- a/python/cloudtik/core/_private/runtime_factory.py +++ b/python/cloudtik/core/_private/runtime_factory.py @@ -44,6 +44,7 @@ BUILT_IN_RUNTIME_DNSMASQ = "dnsmasq" BUILT_IN_RUNTIME_BIND = "bind" BUILT_IN_RUNTIME_COREDNS = "coredns" +BUILT_IN_RUNTIME_KONG = "kong" BUILT_IN_RUNTIME_APISIX = "apisix" DEFAULT_RUNTIMES = [BUILT_IN_RUNTIME_PROMETHEUS, BUILT_IN_RUNTIME_NODE_EXPORTER, BUILT_IN_RUNTIME_SPARK] @@ -164,6 +165,11 @@ def _import_coredns(): return CoreDNSRuntime +def _import_kong(): + from cloudtik.runtime.kong.runtime import KongRuntime + return KongRuntime + + def _import_apisix(): from cloudtik.runtime.apisix.runtime import APISIXRuntime return APISIXRuntime @@ -193,6 +199,7 @@ def _import_apisix(): BUILT_IN_RUNTIME_DNSMASQ: _import_dnsmasq, BUILT_IN_RUNTIME_BIND: _import_bind, BUILT_IN_RUNTIME_COREDNS: _import_coredns, + BUILT_IN_RUNTIME_KONG: _import_kong, BUILT_IN_RUNTIME_APISIX: _import_apisix, } diff --git a/python/cloudtik/core/_private/runtime_utils.py b/python/cloudtik/core/_private/runtime_utils.py index bb290aebe..07760dde7 100644 --- a/python/cloudtik/core/_private/runtime_utils.py +++ b/python/cloudtik/core/_private/runtime_utils.py @@ -5,7 +5,7 @@ import yaml from cloudtik.core._private.constants import CLOUDTIK_RUNTIME_ENV_NODE_TYPE, CLOUDTIK_RUNTIME_ENV_NODE_IP, \ - CLOUDTIK_RUNTIME_ENV_SECRETS, CLOUDTIK_RUNTIME_ENV_HEAD_IP + CLOUDTIK_RUNTIME_ENV_SECRETS, CLOUDTIK_RUNTIME_ENV_HEAD_IP, env_bool from cloudtik.core._private.crypto import AESCipher from cloudtik.core._private.utils import load_head_cluster_config, _get_node_type_specific_runtime_config, \ get_runtime_config_key, _get_key_from_kv, decode_cluster_secrets, CLOUDTIK_CLUSTER_NODES_INFO_NODE_TYPE @@ -22,6 +22,10 @@ def get_runtime_value(name): return os.environ.get(name) +def get_runtime_bool(name, default=False): + return env_bool(name, default) + + def get_runtime_node_type(): # Node type should always be set as env node_type = get_runtime_value(CLOUDTIK_RUNTIME_ENV_NODE_TYPE) diff --git a/python/cloudtik/core/_private/utils.py b/python/cloudtik/core/_private/utils.py index a7b650592..4eed362db 100644 --- a/python/cloudtik/core/_private/utils.py +++ b/python/cloudtik/core/_private/utils.py @@ -112,6 +112,9 @@ RUNTIME_TYPES_CONFIG_KEY = "types" ENCRYPTION_KEY_CONFIG_KEY = "encryption.key" +PROVIDER_STORAGE_CONFIG_KEY = "storage" +PROVIDER_DATABASE_CONFIG_KEY = "database" + PRIVACY_CONFIG_KEYS = ["credentials", "account.key", "secret", "access.key", "private.key", "encryption.key"] NODE_INFO_NODE_ID = "node_id" @@ -3065,11 +3068,13 @@ def convert_nodes_to_resource( def get_storage_config_for_update(provider_config): - return get_config_for_update(provider_config, "storage") + return get_config_for_update( + provider_config, PROVIDER_STORAGE_CONFIG_KEY) def get_database_config_for_update(provider_config): - return get_config_for_update(provider_config, "database") + return get_config_for_update( + provider_config, PROVIDER_DATABASE_CONFIG_KEY) def print_json_formatted(json_bytes): diff --git a/python/cloudtik/core/config-schema.json b/python/cloudtik/core/config-schema.json index 610019b51..ab38810b0 100644 --- a/python/cloudtik/core/config-schema.json +++ b/python/cloudtik/core/config-schema.json @@ -1822,6 +1822,36 @@ "description": "Explicit set ETCD service to use." } } + }, + "kong": { + "type": "object", + "description": "Kong runtime configurations", + "additionalProperties": true, + "properties": { + "port": { + "type": "integer", + "default": 8000, + "description": "Kong service port." + }, + "ssl_port": { + "type": "integer", + "default": 8443, + "description": "Kong service SSL port." + }, + "database": { + "$ref": "#/definitions/database_connect", + "description": "The database parameters. Engine, address, port are optional if using service discovery." + }, + "database_service_discovery": { + "type": "boolean", + "description": "Whether to discover and use database service in the same workspace.", + "default": true + }, + "database_service_selector": { + "$ref": "#/definitions/service_selector", + "description": "The selector for database service if service discovery is enabled." + } + } } } } diff --git a/python/cloudtik/providers/_private/_azure/utils.py b/python/cloudtik/providers/_private/_azure/utils.py index ecee37e6f..d989b47fe 100644 --- a/python/cloudtik/providers/_private/_azure/utils.py +++ b/python/cloudtik/providers/_private/_azure/utils.py @@ -17,7 +17,7 @@ from cloudtik.core._private.util.database_utils import get_database_engine, get_database_port, DATABASE_ENV_ENGINE, \ DATABASE_ENV_ENABLED, DATABASE_ENV_PASSWORD, DATABASE_ENV_USERNAME, DATABASE_ENV_PORT, DATABASE_ENV_HOST from cloudtik.core._private.utils import get_storage_config_for_update, get_database_config_for_update, \ - get_config_for_update + get_config_for_update, PROVIDER_DATABASE_CONFIG_KEY, PROVIDER_STORAGE_CONFIG_KEY from cloudtik.providers._private._azure.azure_identity_credential_adapter import AzureIdentityCredentialAdapter AZURE_DATABASE_ENDPOINT = "address" @@ -196,10 +196,11 @@ def _construct_private_dns_client(provider_config): def get_azure_cloud_storage_config(provider_config: Dict[str, Any]): - if "storage" in provider_config and "azure_cloud_storage" in provider_config["storage"]: - return provider_config["storage"]["azure_cloud_storage"] + storage_config = provider_config.get(PROVIDER_STORAGE_CONFIG_KEY) + if not storage_config: + return None - return None + return storage_config.get("azure_cloud_storage") def get_azure_cloud_storage_config_for_update(provider_config: Dict[str, Any]): @@ -270,10 +271,11 @@ def get_default_azure_cloud_storage(provider_config): def get_azure_database_config(provider_config: Dict[str, Any], default=None): - if "database" in provider_config and "azure.database" in provider_config["database"]: - return provider_config["database"]["azure.database"] + database_config = provider_config.get(PROVIDER_DATABASE_CONFIG_KEY) + if not database_config: + return default - return default + return database_config.get("azure.database", default) def get_azure_database_engine(database_config): diff --git a/python/cloudtik/providers/_private/_kubernetes/config.py b/python/cloudtik/providers/_private/_kubernetes/config.py index fd9838491..31f0c80cb 100644 --- a/python/cloudtik/providers/_private/_kubernetes/config.py +++ b/python/cloudtik/providers/_private/_kubernetes/config.py @@ -15,7 +15,8 @@ from cloudtik.core._private.docker import get_versioned_image from cloudtik.core._private.providers import _get_node_provider from cloudtik.core._private.utils import is_use_internal_ip, get_running_head_node, binary_to_hex, hex_to_binary, \ - get_head_service_ports, _is_use_managed_cloud_storage, _is_use_internal_ip, is_gpu_runtime + get_head_service_ports, _is_use_managed_cloud_storage, _is_use_internal_ip, is_gpu_runtime, \ + PROVIDER_DATABASE_CONFIG_KEY, PROVIDER_STORAGE_CONFIG_KEY from cloudtik.core.tags import CLOUDTIK_TAG_CLUSTER_NAME, CLOUDTIK_TAG_NODE_KIND, NODE_KIND_HEAD, \ CLOUDTIK_GLOBAL_VARIABLE_KEY, CLOUDTIK_GLOBAL_VARIABLE_KEY_PREFIX from cloudtik.core.workspace_provider import Existence @@ -1255,7 +1256,7 @@ def _parse_cpu_or_gpu_resource(resource): def get_default_kubernetes_cloud_storage(provider_config): - storage_config = provider_config.get("storage", {}) + storage_config = provider_config.get(PROVIDER_STORAGE_CONFIG_KEY, {}) if "aws_s3_storage" in storage_config: from cloudtik.providers._private._kubernetes.aws_eks.config import get_default_kubernetes_cloud_storage_for_aws @@ -1272,7 +1273,7 @@ def get_default_kubernetes_cloud_storage(provider_config): def get_default_kubernetes_cloud_database(provider_config): - database_config = provider_config.get("database", {}) + database_config = provider_config.get(PROVIDER_DATABASE_CONFIG_KEY, {}) if "aws.database" in database_config: from cloudtik.providers._private._kubernetes.aws_eks.config import \ @@ -1293,7 +1294,7 @@ def get_default_kubernetes_cloud_database(provider_config): def with_kubernetes_environment_variables(provider_config, node_type_config: Dict[str, Any], node_id: str): config_dict = {} - storage_config = provider_config.get("storage", {}) + storage_config = provider_config.get(PROVIDER_STORAGE_CONFIG_KEY, {}) if "aws_s3_storage" in storage_config: from cloudtik.providers._private._kubernetes.aws_eks.config import with_aws_environment_variables diff --git a/python/cloudtik/providers/_private/aliyun/utils.py b/python/cloudtik/providers/_private/aliyun/utils.py index 7a858075d..b4dcd5d89 100644 --- a/python/cloudtik/providers/_private/aliyun/utils.py +++ b/python/cloudtik/providers/_private/aliyun/utils.py @@ -5,7 +5,8 @@ from typing import Any, Dict, List from cloudtik.core._private.constants import CLOUDTIK_DEFAULT_CLOUD_STORAGE_URI -from cloudtik.core._private.utils import get_storage_config_for_update, get_config_for_update +from cloudtik.core._private.utils import get_storage_config_for_update, get_config_for_update, \ + PROVIDER_STORAGE_CONFIG_KEY from cloudtik.core._private.cli_logger import cli_logger @@ -31,10 +32,11 @@ def get_aliyun_oss_storage_config(provider_config: Dict[str, Any]): - if "storage" in provider_config and "aliyun_oss_storage" in provider_config["storage"]: - return provider_config["storage"]["aliyun_oss_storage"] + storage_config = provider_config.get(PROVIDER_STORAGE_CONFIG_KEY) + if not storage_config: + return None - return None + return storage_config.get("aliyun_oss_storage") def get_aliyun_oss_storage_config_for_update(provider_config: Dict[str, Any]): diff --git a/python/cloudtik/providers/_private/aws/utils.py b/python/cloudtik/providers/_private/aws/utils.py index 4871c5b18..950c863e2 100644 --- a/python/cloudtik/providers/_private/aws/utils.py +++ b/python/cloudtik/providers/_private/aws/utils.py @@ -13,7 +13,7 @@ # Max number of retries to AWS (default is 5, time increases exponentially) from cloudtik.core._private.utils import get_storage_config_for_update, get_database_config_for_update, \ - get_config_for_update + get_config_for_update, PROVIDER_DATABASE_CONFIG_KEY, PROVIDER_STORAGE_CONFIG_KEY BOTO_MAX_RETRIES = env_integer("BOTO_MAX_RETRIES", 12) @@ -155,10 +155,11 @@ def __exit__(self, type, value, tb): def get_aws_s3_storage_config(provider_config: Dict[str, Any]): - if "storage" in provider_config and "aws_s3_storage" in provider_config["storage"]: - return provider_config["storage"]["aws_s3_storage"] + storage_config = provider_config.get(PROVIDER_STORAGE_CONFIG_KEY) + if not storage_config: + return None - return None + return storage_config.get("aws_s3_storage") def get_aws_s3_storage_config_for_update(provider_config: Dict[str, Any]): @@ -209,10 +210,11 @@ def get_default_aws_cloud_storage(provider_config): def get_aws_database_config(provider_config: Dict[str, Any], default=None): - if "database" in provider_config and "aws.database" in provider_config["database"]: - return provider_config["database"]["aws.database"] + database_config = provider_config.get(PROVIDER_DATABASE_CONFIG_KEY) + if not database_config: + return default - return default + return database_config.get("aws.database", default) def get_aws_database_engine(database_config): diff --git a/python/cloudtik/providers/_private/gcp/utils.py b/python/cloudtik/providers/_private/gcp/utils.py index 5abdc56a2..e6c54c1c3 100644 --- a/python/cloudtik/providers/_private/gcp/utils.py +++ b/python/cloudtik/providers/_private/gcp/utils.py @@ -13,7 +13,7 @@ from cloudtik.core._private.util.database_utils import get_database_engine, get_database_port, DATABASE_ENV_ENABLED, \ DATABASE_ENV_ENGINE, DATABASE_ENV_HOST, DATABASE_ENV_PORT, DATABASE_ENV_USERNAME, DATABASE_ENV_PASSWORD from cloudtik.core._private.utils import get_storage_config_for_update, get_database_config_for_update, \ - get_config_for_update + get_config_for_update, PROVIDER_DATABASE_CONFIG_KEY, PROVIDER_STORAGE_CONFIG_KEY from cloudtik.providers._private.gcp.node import (GCPNodeType, MAX_POLLS, POLL_INTERVAL) from cloudtik.providers._private.gcp.node import GCPNode @@ -354,10 +354,11 @@ def _is_head_node_a_tpu(config: dict) -> bool: def get_gcp_cloud_storage_config(provider_config: Dict[str, Any]): - if "storage" in provider_config and "gcp_cloud_storage" in provider_config["storage"]: - return provider_config["storage"]["gcp_cloud_storage"] + storage_config = provider_config.get(PROVIDER_STORAGE_CONFIG_KEY) + if not storage_config: + return None - return None + return storage_config.get("gcp_cloud_storage") def get_gcp_cloud_storage_config_for_update(provider_config: Dict[str, Any]): @@ -419,10 +420,11 @@ def get_default_gcp_cloud_storage(provider_config): def get_gcp_database_config(provider_config: Dict[str, Any], default=None): - if "database" in provider_config and "gcp.database" in provider_config["database"]: - return provider_config["database"]["gcp.database"] + database_config = provider_config.get(PROVIDER_DATABASE_CONFIG_KEY) + if not database_config: + return default - return default + return database_config.get("gcp.database", default) def get_gcp_database_engine(database_config): diff --git a/python/cloudtik/providers/_private/huaweicloud/utils.py b/python/cloudtik/providers/_private/huaweicloud/utils.py index d33169202..2877f7bfc 100644 --- a/python/cloudtik/providers/_private/huaweicloud/utils.py +++ b/python/cloudtik/providers/_private/huaweicloud/utils.py @@ -22,7 +22,8 @@ from cloudtik.core._private.constants import \ CLOUDTIK_DEFAULT_CLOUD_STORAGE_URI, env_bool -from cloudtik.core._private.utils import get_storage_config_for_update, get_config_for_update +from cloudtik.core._private.utils import get_storage_config_for_update, get_config_for_update, \ + PROVIDER_STORAGE_CONFIG_KEY OBS_SERVICES_URL = 'https://obs.{location}.myhuaweicloud.com' OBS_SERVICES_DEFAULT_URL = 'https://obs.myhuaweicloud.com' @@ -197,11 +198,11 @@ def _make_obs_client(config_provider: Dict[str, Any], region=None) -> Any: def get_huaweicloud_obs_storage_config(provider_config: Dict[str, Any]): - if "storage" in provider_config and "huaweicloud_obs_storage" in \ - provider_config["storage"]: - return provider_config["storage"]["huaweicloud_obs_storage"] + storage_config = provider_config.get(PROVIDER_STORAGE_CONFIG_KEY) + if not storage_config: + return None - return None + return storage_config.get("huaweicloud_obs_storage") def get_huaweicloud_obs_storage_endpoint(region: str = None) -> str: diff --git a/python/cloudtik/providers/kubernetes/cloudtik_operator/operator_utils.py b/python/cloudtik/providers/kubernetes/cloudtik_operator/operator_utils.py index d39286ae7..8b3f6276a 100644 --- a/python/cloudtik/providers/kubernetes/cloudtik_operator/operator_utils.py +++ b/python/cloudtik/providers/kubernetes/cloudtik_operator/operator_utils.py @@ -7,10 +7,11 @@ from kubernetes.client.rest import ApiException from cloudtik.core._private import constants +from cloudtik.core._private.core_utils import get_config_for_update from cloudtik.providers._private._kubernetes import custom_objects_api from cloudtik.providers._private._kubernetes.config import _get_cluster_selector from cloudtik.providers._private._kubernetes.node_provider import head_service_selector -from cloudtik.core._private.utils import _get_default_config +from cloudtik.core._private.utils import _get_default_config, PROVIDER_STORAGE_CONFIG_KEY CLOUDTIK_API_GROUP = "cloudtik.io" CLOUDTIK_API_VERSION = "v1" @@ -230,9 +231,8 @@ def configure_cloud_storage( if "cloudStorage" not in cloud_config: return - if "storage" not in provider_config: - provider_config["storage"] = {} - storage_config = provider_config["storage"] + storage_config = get_config_for_update( + provider_config, PROVIDER_STORAGE_CONFIG_KEY) cloud_storage = cloud_config["cloudStorage"] for field in cloud_storage: diff --git a/python/cloudtik/runtime/ai/scripts/schema-init.sh b/python/cloudtik/runtime/ai/scripts/schema-init.sh index 7a18ae82a..86da8bcc4 100644 --- a/python/cloudtik/runtime/ai/scripts/schema-init.sh +++ b/python/cloudtik/runtime/ai/scripts/schema-init.sh @@ -16,7 +16,6 @@ function create_database_schema() { } function init_schema() { - DATABASE_NAME=hive_metastore if [ "${SQL_DATABASE}" == "true" ] \ && [ "$AI_WITH_SQL_DATABASE" != "false" ]; then create_database_schema diff --git a/python/cloudtik/runtime/apisix/runtime.py b/python/cloudtik/runtime/apisix/runtime.py index d065e2d88..9fa8465d7 100644 --- a/python/cloudtik/runtime/apisix/runtime.py +++ b/python/cloudtik/runtime/apisix/runtime.py @@ -6,7 +6,7 @@ from cloudtik.runtime.common.runtime_base import RuntimeBase from cloudtik.runtime.apisix.utils import _get_runtime_processes, \ _get_runtime_services, _with_runtime_environment_variables, _config_depended_services, _prepare_config_on_head, \ - _validate_config, _get_runtime_endpoints, _get_head_service_ports + _validate_config, _get_runtime_endpoints, _get_head_service_ports, _get_runtime_logs logger = logging.getLogger(__name__) @@ -53,6 +53,10 @@ def get_head_service_ports(self) -> Dict[str, Any]: def get_runtime_services(self, cluster_name: str): return _get_runtime_services(self.runtime_config, cluster_name) + @staticmethod + def get_logs() -> Dict[str, str]: + return _get_runtime_logs() + @staticmethod def get_processes(): return _get_runtime_processes() diff --git a/python/cloudtik/runtime/apisix/scripts/configure.sh b/python/cloudtik/runtime/apisix/scripts/configure.sh index b06813510..607685662 100644 --- a/python/cloudtik/runtime/apisix/scripts/configure.sh +++ b/python/cloudtik/runtime/apisix/scripts/configure.sh @@ -32,6 +32,7 @@ function check_apisix_installed() { function configure_apisix() { prepare_base_conf + mkdir -p ${APISIX_HOME}/logs APISIX_CONF_DIR=${APISIX_HOME}/conf mkdir -p ${APISIX_CONF_DIR} @@ -42,7 +43,7 @@ function configure_apisix() { sed -i "s#{%admin.port%}#${APISIX_ADMIN_PORT}#g" ${config_template_file} sed -i "s#{%cluster.name%}#${CLOUDTIK_CLUSTER}#g" ${config_template_file} - cp ${config_template_file} ${APISIX_CONF_INCLUDE_DIR}/config.yaml + cp ${config_template_file} ${APISIX_CONF_DIR}/config.yaml } set_head_option "$@" diff --git a/python/cloudtik/runtime/apisix/scripts/install.sh b/python/cloudtik/runtime/apisix/scripts/install.sh index 33e73844d..63d6cb244 100644 --- a/python/cloudtik/runtime/apisix/scripts/install.sh +++ b/python/cloudtik/runtime/apisix/scripts/install.sh @@ -24,7 +24,10 @@ function install_apisix() { | sudo tee /etc/apt/sources.list.d/apisix.list >/dev/null sudo apt-get -qq update -y > /dev/null && \ - sudo DEBIAN_FRONTEND=noninteractive apt-get install -qq -y apisix=${APISIX_VERSION}.\* > /dev/null + sudo DEBIAN_FRONTEND=noninteractive apt-get install -qq -y \ + apisix=${APISIX_VERSION}.\* > /dev/null && \ + sudo rm -f /etc/apt/sources.list.d/openresty.list && \ + sudo rm -f /etc/apt/sources.list.d/apisix.list && \ fi } diff --git a/python/cloudtik/runtime/apisix/scripts/services.sh b/python/cloudtik/runtime/apisix/scripts/services.sh index 64f2f88eb..445716369 100644 --- a/python/cloudtik/runtime/apisix/scripts/services.sh +++ b/python/cloudtik/runtime/apisix/scripts/services.sh @@ -20,10 +20,13 @@ set_service_command "$@" case "$SERVICE_COMMAND" in start) - apisix start -c ${APISIX_CONFIG_FILE} + sudo apisix start \ + -c ${APISIX_CONFIG_FILE} \ + >${APISIX_HOME}/logs/apisix.log 2>&1 ;; stop) - apisix stop + sudo apisix stop \ + >${APISIX_HOME}/logs/apisix.log 2>&1 ;; -h|--help) echo "Usage: $0 start|stop --head" >&2 diff --git a/python/cloudtik/runtime/apisix/utils.py b/python/cloudtik/runtime/apisix/utils.py index 3d8facf4d..18f506ed3 100644 --- a/python/cloudtik/runtime/apisix/utils.py +++ b/python/cloudtik/runtime/apisix/utils.py @@ -2,7 +2,7 @@ from typing import Any, Dict from cloudtik.core._private.core_utils import get_config_for_update, get_list_for_update, get_address_string -from cloudtik.core._private.runtime_factory import BUILT_IN_RUNTIME_APISIX, BUILT_IN_RUNTIME_ETCD +from cloudtik.core._private.runtime_factory import BUILT_IN_RUNTIME_APISIX from cloudtik.core._private.runtime_utils import get_runtime_config_from_node, load_and_save_yaml from cloudtik.core._private.service_discovery.runtime_services import get_service_discovery_runtime from cloudtik.core._private.service_discovery.utils import \ @@ -18,7 +18,7 @@ # The second element, if True, is to filter ps results by command name. # The third element is the process name. # The forth element, if node, the process should on all nodes,if head, the process should on head node. - ["apisix", True, "APISIX", "node"], + ["/usr/local/apisix", False, "APISIX", "node"], ] APISIX_SERVICE_PORT_CONFIG_KEY = "port" @@ -52,15 +52,21 @@ def _get_runtime_processes(): return RUNTIME_PROCESSES +def _get_runtime_logs(): + home_dir = _get_home_dir() + logs_dir = os.path.join(home_dir, "logs") + return {BUILT_IN_RUNTIME_APISIX: logs_dir} + + def _config_depended_services(cluster_config: Dict[str, Any]) -> Dict[str, Any]: cluster_config = discover_etcd_from_workspace( - cluster_config, BUILT_IN_RUNTIME_ETCD) + cluster_config, BUILT_IN_RUNTIME_APISIX) return cluster_config def _prepare_config_on_head(cluster_config: Dict[str, Any]): cluster_config = discover_etcd_on_head( - cluster_config, BUILT_IN_RUNTIME_ETCD) + cluster_config, BUILT_IN_RUNTIME_APISIX) _validate_config(cluster_config, final=True) return cluster_config diff --git a/python/cloudtik/runtime/common/service_discovery/runtime_discovery.py b/python/cloudtik/runtime/common/service_discovery/runtime_discovery.py index b8238a1c4..c80e48b71 100644 --- a/python/cloudtik/runtime/common/service_discovery/runtime_discovery.py +++ b/python/cloudtik/runtime/common/service_discovery/runtime_discovery.py @@ -112,7 +112,7 @@ def discover_consul( config: Dict[str, Any], service_selector_key: str, cluster_config: Dict[str, Any], - discovery_type): + discovery_type: DiscoveryType,): return discover_runtime_service_addresses( config, service_selector_key, runtime_type=BUILT_IN_RUNTIME_CONSUL, @@ -125,7 +125,7 @@ def discover_zookeeper( config: Dict[str, Any], service_selector_key: str, cluster_config: Dict[str, Any], - discovery_type): + discovery_type: DiscoveryType,): service_addresses = discover_runtime_service_addresses( config, service_selector_key, runtime_type=BUILT_IN_RUNTIME_ZOOKEEPER, @@ -141,7 +141,7 @@ def discover_hdfs( config: Dict[str, Any], service_selector_key: str, cluster_config: Dict[str, Any], - discovery_type): + discovery_type: DiscoveryType,): service_addresses = discover_runtime_service_addresses( config, service_selector_key, runtime_type=BUILT_IN_RUNTIME_HDFS, @@ -160,7 +160,7 @@ def discover_metastore( config: Dict[str, Any], service_selector_key: str, cluster_config: Dict[str, Any], - discovery_type): + discovery_type: DiscoveryType,): service_addresses = discover_runtime_service_addresses( config, service_selector_key, runtime_type=BUILT_IN_RUNTIME_METASTORE, @@ -180,12 +180,16 @@ def discover_database( config: Dict[str, Any], service_selector_key: str, cluster_config: Dict[str, Any], - discovery_type): + discovery_type: DiscoveryType, + database_runtime_type=None): # TODO: because feature tag is not supported for workspace based discovery # Use a list of database runtimes here. + if not database_runtime_type: + # if no specific database type, default to all known types + database_runtime_type = BUILT_IN_DATABASE_RUNTIMES service_instance = discover_runtime_service( config, service_selector_key, - runtime_type=BUILT_IN_DATABASE_RUNTIMES, + runtime_type=database_runtime_type, cluster_config=cluster_config, discovery_type=discovery_type, ) @@ -200,7 +204,7 @@ def discover_etcd( config: Dict[str, Any], service_selector_key: str, cluster_config: Dict[str, Any], - discovery_type): + discovery_type: DiscoveryType,): service_addresses = discover_runtime_service_addresses( config, service_selector_key, runtime_type=BUILT_IN_RUNTIME_ETCD, @@ -410,11 +414,14 @@ def discover_zookeeper_on_head( def is_database_service_discovery(runtime_type_config): - return runtime_type_config.get(DATABASE_SERVICE_DISCOVERY_KEY, True) + return runtime_type_config.get( + DATABASE_SERVICE_DISCOVERY_KEY, True) def discover_database_from_workspace( - cluster_config: Dict[str, Any], runtime_type): + cluster_config: Dict[str, Any], runtime_type, + database_runtime_type=None, + allow_local=True): runtime_config = get_runtime_config(cluster_config) runtime_type_config = runtime_config.get(runtime_type, {}) database_config = runtime_type_config.get(DATABASE_CONNECT_KEY, {}) @@ -426,14 +433,15 @@ def discover_database_from_workspace( # 4. if there is database can be discovered if (is_database_configured(database_config) or - get_database_runtime_in_cluster(runtime_config) or + (allow_local and get_database_runtime_in_cluster(runtime_config)) or not is_database_service_discovery(runtime_type_config)): return cluster_config database_service = discover_database( runtime_type_config, DATABASE_SERVICE_SELECTOR_KEY, cluster_config=cluster_config, - discovery_type=DiscoveryType.WORKSPACE) + discovery_type=DiscoveryType.WORKSPACE, + database_runtime_type=database_runtime_type) if database_service: runtime_type_config = get_config_for_update( runtime_config, runtime_type) @@ -445,7 +453,9 @@ def discover_database_from_workspace( def discover_database_on_head( - cluster_config: Dict[str, Any], runtime_type): + cluster_config: Dict[str, Any], runtime_type, + database_runtime_type=None, + allow_local=True): runtime_config = get_runtime_config(cluster_config) runtime_type_config = runtime_config.get(runtime_type, {}) if not is_database_service_discovery(runtime_type_config): @@ -453,7 +463,7 @@ def discover_database_on_head( database_config = runtime_type_config.get(DATABASE_CONNECT_KEY, {}) if (is_database_configured(database_config) or - get_database_runtime_in_cluster(runtime_config)): + (allow_local and get_database_runtime_in_cluster(runtime_config))): # Database already configured return cluster_config @@ -461,7 +471,8 @@ def discover_database_on_head( database_service = discover_database( runtime_type_config, DATABASE_SERVICE_SELECTOR_KEY, cluster_config=cluster_config, - discovery_type=DiscoveryType.CLUSTER) + discovery_type=DiscoveryType.CLUSTER, + database_runtime_type=database_runtime_type) if database_service: runtime_type_config = get_config_for_update( runtime_config, runtime_type) diff --git a/python/cloudtik/runtime/flink/utils.py b/python/cloudtik/runtime/flink/utils.py index a927aba4b..a5a3faa56 100644 --- a/python/cloudtik/runtime/flink/utils.py +++ b/python/cloudtik/runtime/flink/utils.py @@ -11,7 +11,7 @@ get_service_discovery_config, SERVICE_DISCOVERY_FEATURE_SCHEDULER from cloudtik.core._private.utils import round_memory_size_to_gb, load_head_cluster_config, \ RUNTIME_CONFIG_KEY, load_properties_file, save_properties_file, is_use_managed_cloud_storage, \ - print_json_formatted, get_config_for_update, get_runtime_config + print_json_formatted, get_config_for_update, get_runtime_config, PROVIDER_STORAGE_CONFIG_KEY from cloudtik.core.scaling_policy import ScalingPolicy from cloudtik.runtime.common.service_discovery.cluster import has_runtime_in_cluster from cloudtik.runtime.common.service_discovery.runtime_discovery import \ @@ -293,7 +293,8 @@ def _is_valid_storage_config(config: Dict[str, Any], final=False): # Check any cloud storage is configured provider_config = config["provider"] - if ("storage" in provider_config) or is_use_managed_cloud_storage(config): + if (PROVIDER_STORAGE_CONFIG_KEY in provider_config or + (not final and is_use_managed_cloud_storage(config))): return True # if there is service discovery mechanism, assume we can get from service discovery diff --git a/python/cloudtik/runtime/grafana/scripts/configure.py b/python/cloudtik/runtime/grafana/scripts/configure.py index 495d20601..04d9e803f 100644 --- a/python/cloudtik/runtime/grafana/scripts/configure.py +++ b/python/cloudtik/runtime/grafana/scripts/configure.py @@ -1,6 +1,6 @@ import argparse -import os +from cloudtik.core._private.runtime_utils import get_runtime_bool from cloudtik.runtime.grafana.utils import configure_data_sources @@ -11,8 +11,8 @@ def main(): help='Configuring for head node.') args = parser.parse_args() - high_availability = os.environ.get("GRAFANA_HIGH_AVAILABILITY") - if high_availability == "true" or args.head: + high_availability = get_runtime_bool("GRAFANA_HIGH_AVAILABILITY") + if high_availability or args.head: configure_data_sources(args.head) diff --git a/python/cloudtik/runtime/grafana/scripts/services.py b/python/cloudtik/runtime/grafana/scripts/services.py index de19d7d8a..4a1cca7eb 100644 --- a/python/cloudtik/runtime/grafana/scripts/services.py +++ b/python/cloudtik/runtime/grafana/scripts/services.py @@ -1,6 +1,6 @@ import argparse -from cloudtik.core._private.runtime_utils import get_runtime_value +from cloudtik.core._private.runtime_utils import get_runtime_value, get_runtime_bool from cloudtik.runtime.grafana.utils import start_pull_server, stop_pull_server, \ GRAFANA_DATA_SOURCES_SCOPE_WORKSPACE @@ -34,8 +34,8 @@ def main(): ) args = parser.parse_args() - high_availability = get_runtime_value("GRAFANA_HIGH_AVAILABILITY") - if high_availability == "true" or args.head: + high_availability = get_runtime_bool("GRAFANA_HIGH_AVAILABILITY") + if high_availability or args.head: if args.command == "start": start_service(args.head) elif args.command == "stop": diff --git a/python/cloudtik/runtime/haproxy/scripts/configure.py b/python/cloudtik/runtime/haproxy/scripts/configure.py index 623949625..70c379fdb 100644 --- a/python/cloudtik/runtime/haproxy/scripts/configure.py +++ b/python/cloudtik/runtime/haproxy/scripts/configure.py @@ -1,6 +1,6 @@ import argparse -import os +from cloudtik.core._private.runtime_utils import get_runtime_bool from cloudtik.runtime.haproxy.utils import configure_backend @@ -11,8 +11,8 @@ def main(): help='Configuring for head node.') args = parser.parse_args() - high_availability = os.environ.get("HAPROXY_HIGH_AVAILABILITY") - if high_availability == "true" or args.head: + high_availability = get_runtime_bool("HAPROXY_HIGH_AVAILABILITY") + if high_availability or args.head: configure_backend(args.head) diff --git a/python/cloudtik/runtime/haproxy/scripts/services.py b/python/cloudtik/runtime/haproxy/scripts/services.py index 0fb764ef5..66a6e86d7 100644 --- a/python/cloudtik/runtime/haproxy/scripts/services.py +++ b/python/cloudtik/runtime/haproxy/scripts/services.py @@ -1,6 +1,6 @@ import argparse -from cloudtik.core._private.runtime_utils import get_runtime_value +from cloudtik.core._private.runtime_utils import get_runtime_value, get_runtime_bool from cloudtik.runtime.haproxy.utils \ import start_pull_server, stop_pull_server, HAPROXY_CONFIG_MODE_DYNAMIC @@ -34,8 +34,8 @@ def main(): ) args = parser.parse_args() - high_availability = get_runtime_value("HAPROXY_HIGH_AVAILABILITY") - if high_availability == "true" or args.head: + high_availability = get_runtime_bool("HAPROXY_HIGH_AVAILABILITY") + if high_availability or args.head: if args.command == "start": start_service(args.head) elif args.command == "stop": diff --git a/python/cloudtik/runtime/kong/__init__.py b/python/cloudtik/runtime/kong/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/python/cloudtik/runtime/kong/conf/kong.conf b/python/cloudtik/runtime/kong/conf/kong.conf new file mode 100644 index 000000000..ccdaff456 --- /dev/null +++ b/python/cloudtik/runtime/kong/conf/kong.conf @@ -0,0 +1,1992 @@ +# ----------------------- +# Kong configuration file +# ----------------------- +# +# The commented-out settings shown in this file represent the default values. +# +# This file is read when `kong start` or `kong prepare` are used. Kong +# generates the Nginx configuration with the settings specified in this file. +# +# All environment variables prefixed with `KONG_` and capitalized will override +# the settings specified in this file. +# Example: +# `log_level` setting -> `KONG_LOG_LEVEL` env variable +# +# Boolean values can be specified as `on`/`off` or `true`/`false`. +# Lists must be specified as comma-separated strings. +# +# All comments in this file can be removed safely, including the +# commented-out properties. +# You can verify the integrity of your settings with `kong check `. + +#------------------------------------------------------------------------------ +# GENERAL +#------------------------------------------------------------------------------ + +#prefix = /usr/local/kong/ # Working directory. Equivalent to Nginx's + # prefix path, containing temporary files + # and logs. + # Each Kong process must have a separate + # working directory. + +#log_level = notice # Log level of the Nginx server. Logs are + # found at `/logs/error.log`. + +# See http://nginx.org/en/docs/ngx_core_module.html#error_log for a list +# of accepted values. + +#proxy_access_log = logs/access.log # Path for proxy port request access + # logs. Set this value to `off` to + # disable logging proxy requests. + # If this value is a relative path, + # it will be placed under the + # `prefix` location. + + +#proxy_error_log = logs/error.log # Path for proxy port request error + # logs. The granularity of these logs + # is adjusted by the `log_level` + # property. + +#proxy_stream_access_log = logs/access.log basic # Path for tcp streams proxy port access + # logs. Set this value to `off` to + # disable logging proxy requests. + # If this value is a relative path, + # it will be placed under the + # `prefix` location. + # `basic` is defined as `'$remote_addr [$time_local] ' + # '$protocol $status $bytes_sent $bytes_received ' + # '$session_time'` + +#proxy_stream_error_log = logs/error.log # Path for tcp streams proxy port request error + # logs. The granularity of these logs + # is adjusted by the `log_level` + # property. + +#admin_access_log = logs/admin_access.log # Path for Admin API request access + # logs. If Hybrid Mode is enabled + # and the current node is set to be + # the Control Plane, then the + # connection requests from Data Planes + # are also written to this file with + # server name "kong_cluster_listener". + # + # Set this value to `off` to + # disable logging Admin API requests. + # If this value is a relative path, + # it will be placed under the + # `prefix` location. + +#admin_error_log = logs/error.log # Path for Admin API request error + # logs. The granularity of these logs + # is adjusted by the `log_level` + # property. + +#status_access_log = off # Path for Status API request access + # logs. The default value of `off` + # implies that logging for this API + # is disabled by default. + # If this value is a relative path, + # it will be placed under the + # `prefix` location. + +#status_error_log = logs/status_error.log # Path for Status API request error + # logs. The granularity of these logs + # is adjusted by the `log_level` + # property. + +#vaults = bundled # Comma-separated list of vaults this node + # should load. By default, all the bundled + # vaults are enabled. + # + # The specified name(s) will be substituted as + # such in the Lua namespace: + # `kong.vaults.{name}.*`. + +#opentelemetry_tracing = off # Deprecated: use tracing_instrumentations instead + +#tracing_instrumentations = off # Comma-separated list of tracing instrumentations + # this node should load. By default, no instrumentations + # are enabled. + # + # Valid values to this setting are: + # + # - `off`: do not enable instrumentations. + # - `request`: only enable request-level instrumentations. + # - `all`: enable all the following instrumentations. + # - `db_query`: trace database query + # - `dns_query`: trace DNS query. + # - `router`: trace router execution, including + # router rebuilding. + # - `http_client`: trace OpenResty HTTP client requests. + # - `balancer`: trace balancer retries. + # - `plugin_rewrite`: trace plugins iterator + # execution with rewrite phase. + # - `plugin_access`: trace plugins iterator + # execution with access phase. + # - `plugin_header_filter`: trace plugins iterator + # execution with header_filter phase. + # + # **Note:** In the current implementation, + # tracing instrumentations are not enabled in + # stream mode. + +#opentelemetry_tracing_sampling_rate = 1.0 # Deprecated: use tracing_sampling_rate instead +#tracing_sampling_rate = 0.01 # Tracing instrumentation sampling rate. + # Tracer samples a fixed percentage of all spans + # following the sampling rate. + # + # Example: `0.25`, this should account for 25% of all traces. + + +#plugins = bundled # Comma-separated list of plugins this node + # should load. By default, only plugins + # bundled in official distributions are + # loaded via the `bundled` keyword. + # + # Loading a plugin does not enable it by + # default, but only instructs Kong to load its + # source code, and allows to configure the + # plugin via the various related Admin API + # endpoints. + # + # The specified name(s) will be substituted as + # such in the Lua namespace: + # `kong.plugins.{name}.*`. + # + # When the `off` keyword is specified as the + # only value, no plugins will be loaded. + # + # `bundled` and plugin names can be mixed + # together, as the following examples suggest: + # + # - `plugins = bundled,custom-auth,custom-log` + # will include the bundled plugins plus two + # custom ones + # - `plugins = custom-auth,custom-log` will + # *only* include the `custom-auth` and + # `custom-log` plugins. + # - `plugins = off` will not include any + # plugins + # + # **Note:** Kong will not start if some + # plugins were previously configured (i.e. + # have rows in the database) and are not + # specified in this list. Before disabling a + # plugin, ensure all instances of it are + # removed before restarting Kong. + # + # **Note:** Limiting the amount of available + # plugins can improve P99 latency when + # experiencing LRU churning in the database + # cache (i.e. when the configured + # `mem_cache_size`) is full. + +#pluginserver_names = # Comma-separated list of names for pluginserver + # processes. The actual names are used for + # log messages and to relate the actual settings. + +#pluginserver_XXX_socket = /.socket # Path to the unix socket + # used by the pluginserver. +#pluginserver_XXX_start_cmd = /usr/local/bin/ # Full command (including + # any needed arguments) to + # start the pluginserver +#pluginserver_XXX_query_cmd = /usr/local/bin/query_ # Full command to "query" the + # pluginserver. Should + # produce a JSON with the + # dump info of all plugins it + # manages + +#port_maps = # With this configuration parameter, you can + # let the Kong to know about the port from + # which the packets are forwarded to it. This + # is fairly common when running Kong in a + # containerized or virtualized environment. + # For example, `port_maps=80:8000, 443:8443` + # instructs Kong that the port 80 is mapped + # to 8000 (and the port 443 to 8443), where + # 8000 and 8443 are the ports that Kong is + # listening to. + # + # This parameter helps Kong set a proper + # forwarded upstream HTTP request header or to + # get the proper forwarded port with the Kong PDK + # (in case other means determining it has + # failed). It changes routing by a destination + # port to route by a port from which packets + # are forwarded to Kong, and similarly it + # changes the default plugin log serializer to + # use the port according to this mapping + # instead of reporting the port Kong is + # listening to. + +#anonymous_reports = on # Send anonymous usage data such as error + # stack traces to help improve Kong. + + +#proxy_server = # Proxy server defined as a URL. Kong will only use this + # option if any component is explicitly configured + # to use proxy. + + +#proxy_server_ssl_verify = off # Toggles server certificate verification if + # `proxy_server` is in HTTPS. + # See the `lua_ssl_trusted_certificate` + # setting to specify a certificate authority. + +#error_template_html = # Path to the custom html error template to + # override the default html kong error template. + # + # The template is required to contain one single `%s` + # placeholder for the error message, as in the + # following example: + # ``` + # + # + #

My custom error template

+ #

%s.

+ # + # + # ``` + +#error_template_json = # Path to the custom json error template to + # override the default json kong error template. + # + # Similarly to `error_template_html`, the template + # is required to contain one single `%s` placeholder for + # the error message. + +#error_template_xml = # Path to the custom xml error template to + # override the default xml kong error template + # + # Similarly to `error_template_html`, the template + # is required to contain one single `%s` placeholder for + # the error message. + +#error_template_plain = # Path to the custom plain error template to + # override the default plain kong error template + # + # Similarly to `error_template_html`, the template + # is required to contain one single `%s` placeholder for + # the error message. + +#------------------------------------------------------------------------------ +# HYBRID MODE +#------------------------------------------------------------------------------ + +#role = traditional # Use this setting to enable Hybrid Mode, + # This allows running some Kong nodes in a + # control plane role with a database and + # have them deliver configuration updates + # to other nodes running to DB-less running in + # a Data Plane role. + # + # Valid values to this setting are: + # + # - `traditional`: do not use Hybrid Mode. + # - `control_plane`: this node runs in a + # control plane role. It can use a database + # and will deliver configuration updates + # to data plane nodes. + # - `data_plane`: this is a data plane node. + # It runs DB-less and receives configuration + # updates from a control plane node. + +#cluster_mtls = shared # Sets the verification between nodes of the + # cluster. + # + # Valid values to this setting are: + # + # - `shared`: use a shared certificate/key + # pair specified with the `cluster_cert` + # and `cluster_cert_key` settings. + # Note that CP and DP nodes have to present + # the same certificate to establish mTLS + # connections. + # - `pki`: use `cluster_ca_cert`, + # `cluster_server_name` and `cluster_cert` + # for verification. + # These are different certificates for each + # DP node, but issued by a cluster-wide + # common CA certificate: `cluster_ca_cert`. + +#cluster_cert = # Cluster certificate to use + # when establishing secure communication + # between control and data plane nodes. + # You can use the `kong hybrid` command to + # generate the certificate/key pair. + # Under `shared` mode, it must be the same + # for all nodes. Under `pki` mode it + # should be a different certificate for each + # DP node. + # + # The certificate can be configured on this + # property with either of the following values: + # * absolute path to the certificate + # * certificate content + # * base64 encoded certificate content + +#cluster_cert_key = # Cluster certificate key to + # use when establishing secure communication + # between control and data plane nodes. + # You can use the `kong hybrid` command to + # generate the certificate/key pair. + # Under `shared` mode, it must be the same + # for all nodes. Under `pki` mode it + # should be a different certificate for each + # DP node. + # + # The certificate key can be configured on this + # property with either of the following values: + # * absolute path to the certificate key + # * certificate key content + # * base64 encoded certificate key content + +#cluster_ca_cert = # The trusted CA certificate file in PEM + # format used for Control Plane to verify + # Data Plane's certificate and Data Plane + # to verify Control Plane's certificate. + # Required on data plane if `cluster_mtls` + # is set to `pki`. + # If Control Plane certificate is issued + # by a well known CA, user can set + # `lua_ssl_trusted_certificate=system` + # on Data Plane and leave this field empty. + # + # This field is ignored if `cluster_mtls` is + # set to `shared`. + # + # The certificate can be configured on this property + # with either of the following values: + # * absolute path to the certificate + # * certificate content + # * base64 encoded certificate content + +#------------------------------------------------------------------------------ +# HYBRID MODE DATA PLANE +#------------------------------------------------------------------------------ + +#cluster_server_name = # The server name used in the SNI of the TLS + # connection from a DP node to a CP node. + # Must match the Common Name (CN) or Subject + # Alternative Name (SAN) found in the CP + # certificate. + # If `cluster_mtls` is set to + # `shared`, this setting is ignored and + # `kong_clustering` is used. + +#cluster_control_plane = # To be used by data plane nodes only: + # address of the control plane node from + # which configuration updates will be fetched, + # in `host:port` format. + +#cluster_max_payload = 16777216 + # This sets the maximum compressed payload size allowed + # to be sent across from CP to DP in Hybrid mode + # Default is 16MB - 16 * 1024 * 1024. + +#cluster_dp_labels = # Comma separated list of Labels for the data plane. + # Labels are key-value pairs that provide additional + # context information for each DP. + # Each label must be configured as a string in the + # format `key:value`. + # + # Labels are only compatible with hybrid mode + # deployments with Kong Konnect (SaaS), + # this configuration doesn't work with + # self-hosted deployments. + # + # Keys and values follow the AIP standards: + # https://kong-aip.netlify.app/aip/129/ + # + # Example: + # `deployment:mycloud,region:us-east-1` + +#------------------------------------------------------------------------------ +# HYBRID MODE CONTROL PLANE +#------------------------------------------------------------------------------ + +#cluster_listen = 0.0.0.0:8005 + # Comma-separated list of addresses and ports on + # which the cluster control plane server should listen + # for data plane connections. + # The cluster communication port of the control plane + # must be accessible by all the data planes + # within the same cluster. This port is mTLS protected + # to ensure end-to-end security and integrity. + # + # This setting has no effect if `role` is not set to + # `control_plane`. + # + # Connection made to this endpoint are logged + # to the same location as Admin API access logs. + # See `admin_access_log` config description for more + # information. + +#cluster_data_plane_purge_delay = 1209600 + # How many seconds must pass from the time a DP node + # becomes offline to the time its entry gets removed + # from the database, as returned by the + # /clustering/data-planes Admin API endpoint. + # + # This is to prevent the cluster data plane table from + # growing indefinitely. The default is set to + # 14 days. That is, if CP haven't heard from a DP for + # 14 days, its entry will be removed. + +#cluster_ocsp = off + # Whether to check for revocation status of DP + # certificates using OCSP (Online Certificate Status Protocol). + # If enabled, the DP certificate should contain the + # "Certificate Authority Information Access" extension + # and the OCSP method with URI of which the OCSP responder + # can be reached from CP. + # + # OCSP checks are only performed on CP nodes, it has no + # effect on DP nodes. + # + # Valid values to this setting are: + # + # - `on`: OCSP revocation check is enabled and DP + # must pass the check in order to establish + # connection with CP. + # - `off`: OCSP revocation check is disabled. + # - `optional`: OCSP revocation check will be attempted, + # however, if the required extension is not + # found inside DP provided certificate + # or communication with the OCSP responder + # failed, then DP is still allowed through. +#cluster_use_proxy = off + # Whether to turn on HTTP CONNECT proxy support for + # hybrid mode connections. `proxy_server` will be used + # for Hybrid mode connections if this option is turned on. +#------------------------------------------------------------------------------ +# NGINX +#------------------------------------------------------------------------------ +proxy_listen = 127.0.0.1:{%listen.port%} reuseport backlog=16384, 127.0.0.1:{%listen.ssl.port%} http2 ssl reuseport backlog=16384, {%listen.ip%}:{%listen.port%} reuseport backlog=16384, {%listen.ip%}:{%listen.ssl.port%} http2 ssl reuseport backlog=16384 +#proxy_listen = 0.0.0.0:8000 reuseport backlog=16384, 0.0.0.0:8443 http2 ssl reuseport backlog=16384 + # Comma-separated list of addresses and ports on + # which the proxy server should listen for + # HTTP/HTTPS traffic. + # The proxy server is the public entry point of Kong, + # which proxies traffic from your consumers to your + # backend services. This value accepts IPv4, IPv6, and + # hostnames. + # + # Some suffixes can be specified for each pair: + # + # - `ssl` will require that all connections made + # through a particular address/port be made with TLS + # enabled. + # - `http2` will allow for clients to open HTTP/2 + # connections to Kong's proxy server. + # - `proxy_protocol` will enable usage of the + # PROXY protocol for a given address/port. + # - `deferred` instructs to use a deferred accept on + # Linux (the TCP_DEFER_ACCEPT socket option). + # - `bind` instructs to make a separate bind() call + # for a given address:port pair. + # - `reuseport` instructs to create an individual + # listening socket for each worker process + # allowing the Kernel to better distribute incoming + # connections between worker processes + # - `backlog=N` sets the maximum length for the queue + # of pending TCP connections. This number should + # not be too small in order to prevent clients + # seeing "Connection refused" error connecting to + # a busy Kong instance. + # **Note:** on Linux, this value is limited by the + # setting of `net.core.somaxconn` Kernel parameter. + # In order for the larger `backlog` set here to take + # effect it is necessary to raise + # `net.core.somaxconn` at the same time to match or + # exceed the `backlog` number set. + # - `ipv6only=on|off` whether an IPv6 socket listening + # on a wildcard address [::] will accept only IPv6 + # connections or both IPv6 and IPv4 connections + # - so_keepalive=on|off|[keepidle]:[keepintvl]:[keepcnt] + # configures the “TCP keepalive” behavior for the listening + # socket. If this parameter is omitted then the operating + # system’s settings will be in effect for the socket. If it + # is set to the value “on”, the SO_KEEPALIVE option is turned + # on for the socket. If it is set to the value “off”, the + # SO_KEEPALIVE option is turned off for the socket. Some + # operating systems support setting of TCP keepalive parameters + # on a per-socket basis using the TCP_KEEPIDLE, TCP_KEEPINTVL, + # and TCP_KEEPCNT socket options. + # + # This value can be set to `off`, thus disabling + # the HTTP/HTTPS proxy port for this node. + # If stream_listen is also set to `off`, this enables + # 'control-plane' mode for this node + # (in which all traffic proxying capabilities are + # disabled). This node can then be used only to + # configure a cluster of Kong + # nodes connected to the same datastore. + # + # Example: + # `proxy_listen = 0.0.0.0:443 ssl, 0.0.0.0:444 http2 ssl` + # + # See http://nginx.org/en/docs/http/ngx_http_core_module.html#listen + # for a description of the accepted formats for this + # and other `*_listen` values. + # + # See https://www.nginx.com/resources/admin-guide/proxy-protocol/ + # for more details about the `proxy_protocol` + # parameter. + # + # Not all `*_listen` values accept all formats + # specified in nginx's documentation. + +#stream_listen = off + # Comma-separated list of addresses and ports on + # which the stream mode should listen. + # + # This value accepts IPv4, IPv6, and hostnames. + # Some suffixes can be specified for each pair: + # - `ssl` will require that all connections made + # through a particular address/port be made with TLS + # enabled. + # - `proxy_protocol` will enable usage of the + # PROXY protocol for a given address/port. + # - `bind` instructs to make a separate bind() call + # for a given address:port pair. + # - `reuseport` instructs to create an individual + # listening socket for each worker process + # allowing the Kernel to better distribute incoming + # connections between worker processes + # - `backlog=N` sets the maximum length for the queue + # of pending TCP connections. This number should + # not be too small in order to prevent clients + # seeing "Connection refused" error connecting to + # a busy Kong instance. + # **Note:** on Linux, this value is limited by the + # setting of `net.core.somaxconn` Kernel parameter. + # In order for the larger `backlog` set here to take + # effect it is necessary to raise + # `net.core.somaxconn` at the same time to match or + # exceed the `backlog` number set. + # - `ipv6only=on|off` whether an IPv6 socket listening + # on a wildcard address [::] will accept only IPv6 + # connections or both IPv6 and IPv4 connections + # - so_keepalive=on|off|[keepidle]:[keepintvl]:[keepcnt] + # configures the “TCP keepalive” behavior for the listening + # socket. If this parameter is omitted then the operating + # system’s settings will be in effect for the socket. If it + # is set to the value “on”, the SO_KEEPALIVE option is turned + # on for the socket. If it is set to the value “off”, the + # SO_KEEPALIVE option is turned off for the socket. Some + # operating systems support setting of TCP keepalive parameters + # on a per-socket basis using the TCP_KEEPIDLE, TCP_KEEPINTVL, + # and TCP_KEEPCNT socket options. + # + # Examples: + # + # ``` + # stream_listen = 127.0.0.1:7000 reuseport backlog=16384 + # stream_listen = 0.0.0.0:989 reuseport backlog=65536, 0.0.0.0:20 + # stream_listen = [::1]:1234 backlog=16384 + # ``` + # + # By default this value is set to `off`, thus + # disabling the stream proxy port for this node. + +# See http://nginx.org/en/docs/stream/ngx_stream_core_module.html#listen +# for a description of the formats that Kong might accept in stream_listen. + +admin_listen = 127.0.0.1:{%admin.port%} reuseport backlog=16384, 127.0.0.1:{%admin.ssl.port%} http2 ssl reuseport backlog=16384 +#admin_listen = 127.0.0.1:8001 reuseport backlog=16384, 127.0.0.1:8444 http2 ssl reuseport backlog=16384 + # Comma-separated list of addresses and ports on + # which the Admin interface should listen. + # The Admin interface is the API allowing you to + # configure and manage Kong. + # Access to this interface should be *restricted* + # to Kong administrators *only*. This value accepts + # IPv4, IPv6, and hostnames. + # + # It is highly recommended to avoid exposing the Admin API to public + # interface(s), by using values such as 0.0.0.0:8001 + # + # See https://docs.konghq.com/gateway/latest/production/running-kong/secure-admin-api/ + # for more information about how to secure your Admin API + # + # Some suffixes can be specified for each pair: + # + # - `ssl` will require that all connections made + # through a particular address/port be made with TLS + # enabled. + # - `http2` will allow for clients to open HTTP/2 + # connections to Kong's proxy server. + # - `proxy_protocol` will enable usage of the + # PROXY protocol for a given address/port. + # - `deferred` instructs to use a deferred accept on + # Linux (the TCP_DEFER_ACCEPT socket option). + # - `bind` instructs to make a separate bind() call + # for a given address:port pair. + # - `reuseport` instructs to create an individual + # listening socket for each worker process + # allowing the Kernel to better distribute incoming + # connections between worker processes + # - `backlog=N` sets the maximum length for the queue + # of pending TCP connections. This number should + # not be too small in order to prevent clients + # seeing "Connection refused" error connecting to + # a busy Kong instance. + # **Note:** on Linux, this value is limited by the + # setting of `net.core.somaxconn` Kernel parameter. + # In order for the larger `backlog` set here to take + # effect it is necessary to raise + # `net.core.somaxconn` at the same time to match or + # exceed the `backlog` number set. + # - `ipv6only=on|off` whether an IPv6 socket listening + # on a wildcard address [::] will accept only IPv6 + # connections or both IPv6 and IPv4 connections + # - so_keepalive=on|off|[keepidle]:[keepintvl]:[keepcnt] + # configures the “TCP keepalive” behavior for the listening + # socket. If this parameter is omitted then the operating + # system’s settings will be in effect for the socket. If it + # is set to the value “on”, the SO_KEEPALIVE option is turned + # on for the socket. If it is set to the value “off”, the + # SO_KEEPALIVE option is turned off for the socket. Some + # operating systems support setting of TCP keepalive parameters + # on a per-socket basis using the TCP_KEEPIDLE, TCP_KEEPINTVL, + # and TCP_KEEPCNT socket options. + # + # This value can be set to `off`, thus disabling + # the Admin interface for this node, enabling a + # 'data-plane' mode (without configuration + # capabilities) pulling its configuration changes + # from the database. + # + # Example: `admin_listen = 127.0.0.1:8444 http2 ssl` + +#status_listen = off # Comma-separated list of addresses and ports on + # which the Status API should listen. + # The Status API is a read-only endpoint + # allowing monitoring tools to retrieve metrics, + # healthiness, and other non-sensitive information + # of the current Kong node. + # + # The following suffix can be specified for each pair: + # + # - `ssl` will require that all connections made + # through a particular address/port be made with TLS + # enabled. + # - `http2` will allow for clients to open HTTP/2 + # connections to Kong's proxy server. + # + # This value can be set to `off`, disabling + # the Status API for this node. + # + # Example: `status_listen = 0.0.0.0:8100 ssl http2` + + +#nginx_user = kong kong # Defines user and group credentials used by + # worker processes. If group is omitted, a + # group whose name equals that of user is + # used. + # + # Example: `nginx_user = nginx www` + # + # **Note**: If the `kong` user and the `kong` + # group are not available, the default user + # and group credentials will be + # `nobody nobody`. + +#nginx_worker_processes = auto # Determines the number of worker processes + # spawned by Nginx. + # + # See http://nginx.org/en/docs/ngx_core_module.html#worker_processes + # for detailed usage of the equivalent Nginx + # directive and a description of accepted + # values. + +#nginx_daemon = on # Determines whether Nginx will run as a daemon + # or as a foreground process. Mainly useful + # for development or when running Kong inside + # a Docker environment. + # + # See http://nginx.org/en/docs/ngx_core_module.html#daemon. + +#mem_cache_size = 128m # Size of each of the two shared memory caches + # for traditional mode database entities + # and runtime data. + # The accepted units are `k` and `m`, with a minimum + # recommended value of a few MBs. + # + # **Note**: As this option controls the size of two + # different cache zones, the total memory Kong + # uses to cache entities might be double this value. + # The created zones are shared by all worker + # processes and do not become larger when more + # worker is used. + +#ssl_cipher_suite = intermediate # Defines the TLS ciphers served by Nginx. + # Accepted values are `modern`, + # `intermediate`, `old`, `fips` or `custom`. + # + # See https://wiki.mozilla.org/Security/Server_Side_TLS + # for detailed descriptions of each cipher + # suite. `fips` cipher suites are as described in + # https://wiki.openssl.org/index.php/FIPS_mode_and_TLS. + +#ssl_ciphers = # Defines a custom list of TLS ciphers to be + # served by Nginx. This list must conform to + # the pattern defined by `openssl ciphers`. + # This value is ignored if `ssl_cipher_suite` + # is not `custom`. + +#ssl_protocols = TLSv1.1 TLSv1.2 TLSv1.3 + # Enables the specified protocols for + # client-side connections. The set of + # supported protocol versions also depends + # on the version of OpenSSL Kong was built + # with. This value is ignored if + # `ssl_cipher_suite` is not `custom`. + # + # See http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols + +#ssl_prefer_server_ciphers = on # Specifies that server ciphers should be + # preferred over client ciphers when using + # the SSLv3 and TLS protocols. This value is + # ignored if `ssl_cipher_suite` is not `custom`. + # + # See http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_prefer_server_ciphers + +#ssl_dhparam = # Defines DH parameters for DHE ciphers from the + # predefined groups: `ffdhe2048`, `ffdhe3072`, + # `ffdhe4096`, `ffdhe6144`, `ffdhe8192`, + # from the absolute path to a parameters file, or + # directly from the parameters content. + # + # This value is ignored if `ssl_cipher_suite` + # is `modern` or `intermediate`. The reason is + # that `modern` has no ciphers that needs this, + # and `intermediate` uses `ffdhe2048`. + # + # See http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_dhparam + +#ssl_session_tickets = on # Enables or disables session resumption through + # TLS session tickets. This has no impact when + # used with TLSv1.3. + # + # Kong enables this by default for performance + # reasons, but it has security implications: + # https://github.com/mozilla/server-side-tls/issues/135 + # + # See http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_tickets + +#ssl_session_timeout = 1d # Specifies a time during which a client may + # reuse the session parameters. See the rationale: + # https://github.com/mozilla/server-side-tls/issues/198 + # + # See http://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_timeout + +#ssl_session_cache_size = 10m # Sets the size of the caches that store session parameters + # + # See https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_cache + +#ssl_cert = # Comma-separated list of certificates for `proxy_listen` values with TLS enabled. + # + # If more than one certificates are specified, it can be used to provide + # alternate type of certificate (for example, ECC certificate) that will be served + # to clients that supports them. Note to properly serve using ECC certificates, + # it is recommended to also set `ssl_cipher_suite` to + # `modern` or `intermediate`. + # + # Unless this option is explicitly set, Kong will auto-generate + # a pair of default certificates (RSA + ECC) first time it starts up and use + # it for serving TLS requests. + # + # Certificates can be configured on this property with either of the following + # values: + # * absolute path to the certificate + # * certificate content + # * base64 encoded certificate content + +#ssl_cert_key = # Comma-separated list of keys for `proxy_listen` values with TLS enabled. + # + # If more than one certificate was specified for `ssl_cert`, then this + # option should contain the corresponding key for all certificates + # provided in the same order. + # + # Unless this option is explicitly set, Kong will auto-generate + # a pair of default private keys (RSA + ECC) first time it starts up and use + # it for serving TLS requests. + # + # Keys can be configured on this property with either of the following + # values: + # * absolute path to the certificate key + # * certificate key content + # * base64 encoded certificate key content + +#client_ssl = off # Determines if Nginx should attempt to send client-side + # TLS certificates and perform Mutual TLS Authentication + # with upstream service when proxying requests. + +#client_ssl_cert = # If `client_ssl` is enabled, the client certificate + # for the `proxy_ssl_certificate` directive. + # + # This value can be overwritten dynamically with the `client_certificate` + # attribute of the `Service` object. + # + # The certificate can be configured on this property with either of the following + # values: + # * absolute path to the certificate + # * certificate content + # * base64 encoded certificate content + +#client_ssl_cert_key = # If `client_ssl` is enabled, the client TLS key + # for the `proxy_ssl_certificate_key` directive. + # + # This value can be overwritten dynamically with the `client_certificate` + # attribute of the `Service` object. + # + # The certificate key can be configured on this property with either of the following + # values: + # * absolute path to the certificate key + # * certificate key content + # * base64 encoded certificate key content + +#admin_ssl_cert = # Comma-separated list of certificates for `admin_listen` values with TLS enabled. + # + # See docs for `ssl_cert` for detailed usage. + +#admin_ssl_cert_key = # Comma-separated list of keys for `admin_listen` values with TLS enabled. + # + # See docs for `ssl_cert_key` for detailed usage. + +#status_ssl_cert = # Comma-separated list of certificates for `status_listen` values with TLS enabled. + # + # See docs for `ssl_cert` for detailed usage. + +#status_ssl_cert_key = # Comma-separated list of keys for `status_listen` values with TLS enabled. + # + # See docs for `ssl_cert_key` for detailed usage. + +#headers = server_tokens, latency_tokens + # Comma-separated list of headers Kong should + # inject in client responses. + # + # Accepted values are: + # - `Server`: Injects `Server: kong/x.y.z` + # on Kong-produced response (e.g. Admin + # API, rejected requests from auth plugin). + # - `Via`: Injects `Via: kong/x.y.z` for + # successfully proxied requests. + # - `X-Kong-Proxy-Latency`: Time taken + # (in milliseconds) by Kong to process + # a request and run all plugins before + # proxying the request upstream. + # - `X-Kong-Response-Latency`: time taken + # (in millisecond) by Kong to produce + # a response in case of e.g. plugin + # short-circuiting the request, or in + # in case of an error. + # - `X-Kong-Upstream-Latency`: Time taken + # (in milliseconds) by the upstream + # service to send response headers. + # - `X-Kong-Admin-Latency`: Time taken + # (in milliseconds) by Kong to process + # an Admin API request. + # - `X-Kong-Upstream-Status`: The HTTP status + # code returned by the upstream service. + # This is particularly useful for clients to + # distinguish upstream statuses if the + # response is rewritten by a plugin. + # - `server_tokens`: Same as specifying both + # `Server` and `Via`. + # - `latency_tokens`: Same as specifying + # `X-Kong-Proxy-Latency`, + # `X-Kong-Response-Latency`, + # `X-Kong-Admin-Latency` and + # `X-Kong-Upstream-Latency` + # + # In addition to those, this value can be set + # to `off`, which prevents Kong from injecting + # any of the above headers. Note that this + # does not prevent plugins from injecting + # headers of their own. + # + # Example: `headers = via, latency_tokens` + +#trusted_ips = # Defines trusted IP addresses blocks that are + # known to send correct `X-Forwarded-*` + # headers. + # Requests from trusted IPs make Kong forward + # their `X-Forwarded-*` headers upstream. + # Non-trusted requests make Kong insert its + # own `X-Forwarded-*` headers. + # + # This property also sets the + # `set_real_ip_from` directive(s) in the Nginx + # configuration. It accepts the same type of + # values (CIDR blocks) but as a + # comma-separated list. + # + # To trust *all* /!\ IPs, set this value to + # `0.0.0.0/0,::/0`. + # + # If the special value `unix:` is specified, + # all UNIX-domain sockets will be trusted. + # + # See http://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from + # for examples of accepted values. + +#real_ip_header = X-Real-IP # Defines the request header field whose value + # will be used to replace the client address. + # This value sets the `ngx_http_realip_module` + # directive of the same name in the Nginx + # configuration. + # + # If this value receives `proxy_protocol`: + # + # - at least one of the `proxy_listen` entries + # must have the `proxy_protocol` flag + # enabled. + # - the `proxy_protocol` parameter will be + # appended to the `listen` directive of the + # Nginx template. + # + # See http://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header + # for a description of this directive. + +#real_ip_recursive = off # This value sets the `ngx_http_realip_module` + # directive of the same name in the Nginx + # configuration. + # + # See http://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_recursive + # for a description of this directive. + +#error_default_type = text/plain # Default MIME type to use when the request + # `Accept` header is missing and Nginx + # is returning an error for the request. + # Accepted values are `text/plain`, + # `text/html`, `application/json`, and + # `application/xml`. + +#upstream_keepalive_pool_size = 60 # Sets the default size of the upstream + # keepalive connection pools. + # Upstream keepalive connection pools + # are segmented by the `dst ip/dst + # port/SNI` attributes of a connection. + # A value of `0` will disable upstream + # keepalive connections by default, forcing + # each upstream request to open a new + # connection. + +#upstream_keepalive_max_requests = 100 # Sets the default maximum number of + # requests than can be proxied upstream + # through one keepalive connection. + # After the maximum number of requests + # is reached, the connection will be + # closed. + # A value of `0` will disable this + # behavior, and a keepalive connection + # can be used to proxy an indefinite + # number of requests. + +#upstream_keepalive_idle_timeout = 60 # Sets the default timeout (in seconds) + # for which an upstream keepalive + # connection should be kept open. When + # the timeout is reached while the + # connection has not been reused, it + # will be closed. + # A value of `0` will disable this + # behavior, and an idle keepalive + # connection may be kept open + # indefinitely. + +#allow_debug_header = off # Enable the `Kong-Debug` header function. + # if it is `on`, kong will add + # `Kong-Route-Id` `Kong-Route-Name` `Kong-Service-Id` + # `Kong-Service-Name` debug headers to response when + # the client request header `Kong-Debug: 1` is present. + +#------------------------------------------------------------------------------ +# NGINX injected directives +#------------------------------------------------------------------------------ + +# Nginx directives can be dynamically injected in the runtime nginx.conf file +# without requiring a custom Nginx configuration template. +# +# All configuration properties respecting the naming scheme +# `nginx__` will result in `` being injected in +# the Nginx configuration block corresponding to the property's ``. +# Example: +# `nginx_proxy_large_client_header_buffers = 8 24k` +# +# Will inject the following directive in Kong's proxy `server {}` block: +# +# `large_client_header_buffers 8 24k;` +# +# The following namespaces are supported: +# +# - `nginx_main_`: Injects `` in Kong's configuration +# `main` context. +# - `nginx_events_`: Injects `` in Kong's `events {}` +# block. +# - `nginx_http_`: Injects `` in Kong's `http {}` block. +# - `nginx_proxy_`: Injects `` in Kong's proxy +# `server {}` block. +# - `nginx_upstream_`: Injects `` in Kong's proxy +# `upstream {}` block. +# - `nginx_admin_`: Injects `` in Kong's Admin API +# `server {}` block. +# - `nginx_status_`: Injects `` in Kong's Status API +# `server {}` block (only effective if `status_listen` is enabled). +# - `nginx_stream_`: Injects `` in Kong's stream module +# `stream {}` block (only effective if `stream_listen` is enabled). +# - `nginx_sproxy_`: Injects `` in Kong's stream module +# `server {}` block (only effective if `stream_listen` is enabled). +# - `nginx_supstream_`: Injects `` in Kong's stream +# module `upstream {}` block. +# +# As with other configuration properties, Nginx directives can be injected via +# environment variables when capitalized and prefixed with `KONG_`. +# Example: +# `KONG_NGINX_HTTP_SSL_PROTOCOLS` -> `nginx_http_ssl_protocols` +# +# Will inject the following directive in Kong's `http {}` block: +# +# `ssl_protocols ;` +# +# If different sets of protocols are desired between the proxy and Admin API +# server, you may specify `nginx_proxy_ssl_protocols` and/or +# `nginx_admin_ssl_protocols`, both of which taking precedence over the +# `http {}` block. + +#nginx_main_worker_rlimit_nofile = auto + # Changes the limit on the maximum number of open files + # for worker processes. + # + # The special and default value of `auto` sets this + # value to `ulimit -n` with the upper bound limited to + # 16384 as a measure to protect against excess memory use, + # and the lower bound of 1024 as a good default. + # + # See http://nginx.org/en/docs/ngx_core_module.html#worker_rlimit_nofile + +#nginx_events_worker_connections = auto + # Sets the maximum number of simultaneous + # connections that can be opened by a worker process. + # + # The special and default value of `auto` sets this + # value to `ulimit -n` with the upper bound limited to + # 16384 as a measure to protect against excess memory use, + # and the lower bound of 1024 as a good default. + # + # See http://nginx.org/en/docs/ngx_core_module.html#worker_connections + +#nginx_http_client_header_buffer_size = 1k # Sets buffer size for reading the + # client request headers. + # See http://nginx.org/en/docs/http/ngx_http_core_module.html#client_header_buffer_size + +#nginx_http_large_client_header_buffers = 4 8k # Sets the maximum number and + # size of buffers used for + # reading large clients + # requests headers. + # See http://nginx.org/en/docs/http/ngx_http_core_module.html#large_client_header_buffers + +#nginx_http_client_max_body_size = 0 # Defines the maximum request body size + # allowed by requests proxied by Kong, + # specified in the Content-Length request + # header. If a request exceeds this + # limit, Kong will respond with a 413 + # (Request Entity Too Large). Setting + # this value to 0 disables checking the + # request body size. + # See http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size + +#nginx_admin_client_max_body_size = 10m # Defines the maximum request body size for + # Admin API. + +#nginx_http_charset = UTF-8 # Adds the specified charset to the “Content-Type” + # response header field. If this charset is different + # from the charset specified in the source_charset + # directive, a conversion is performed. + # + # The parameter `off` cancels the addition of + # charset to the “Content-Type” response header field. + # See http://nginx.org/en/docs/http/ngx_http_charset_module.html#charset + +#nginx_http_client_body_buffer_size = 8k # Defines the buffer size for reading + # the request body. If the client + # request body is larger than this + # value, the body will be buffered to + # disk. Note that when the body is + # buffered to disk, Kong plugins that + # access or manipulate the request + # body may not work, so it is + # advisable to set this value as high + # as possible (e.g., set it as high + # as `client_max_body_size` to force + # request bodies to be kept in + # memory). Do note that + # high-concurrency environments will + # require significant memory + # allocations to process many + # concurrent large request bodies. + # See http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size + +#nginx_admin_client_body_buffer_size = 10m # Defines the buffer size for reading + # the request body on Admin API. + +#nginx_http_lua_regex_match_limit = 100000 # Global `MATCH_LIMIT` for PCRE + # regex matching. The default of `100000` should ensure + # at worst any regex Kong executes could finish within + # roughly 2 seconds. + +#nginx_http_lua_regex_cache_max_entries = 8192 # Specifies the maximum number of entries allowed + # in the worker process level compiled regex cache. + # It is recommended to set it to at least (number of regex paths * 2) + # to avoid high CPU usages. + +#------------------------------------------------------------------------------ +# DATASTORE +#------------------------------------------------------------------------------ + +# Kong can run with a database to store coordinated data between Kong nodes in +# a cluster, or without a database, where each node stores its information +# independently in memory. +# +# When using a database, Kong will store data for all its entities (such as +# Routes, Services, Consumers, and Plugins) in PostgreSQL, +# and all Kong nodes belonging to the same cluster must connect themselves +# to the same database. +# +# Kong supports PostgreSQL versions 9.5 and above. +# +# When not using a database, Kong is said to be in "DB-less mode": it will keep +# its entities in memory, and each node needs to have this data entered via a +# declarative configuration file, which can be specified through the +# `declarative_config` property, or via the Admin API using the `/config` +# endpoint. +# +# When using Postgres as the backend storage, you can optionally enable Kong +# to serve read queries from a separate database instance. +# When the number of proxies is large, this can greatly reduce the load +# on the main Postgres instance and achieve better scalability. It may also +# reduce the latency jitter if the Kong proxy node's latency to the main +# Postgres instance is high. +# +# The read-only Postgres instance only serves read queries and write +# queries still goes to the main connection. The read-only Postgres instance +# can be eventually consistent while replicating changes from the main +# instance. +# +# At least the `pg_ro_host` config is needed to enable this feature. +# By default, all other database config for the read-only connection are +# inherited from the corresponding main connection config described above but +# may be optionally overwritten explicitly using the `pg_ro_*` config below. + +database = postgres # Determines the database (or no database) for + # this node + # Accepted values are `postgres` and `off`. + +pg_host = {%database.host%} # Host of the Postgres server. +pg_port = {%database.port%} # Port of the Postgres server. +#pg_timeout = 5000 # Defines the timeout (in ms), for connecting, + # reading and writing. + +pg_user = {%database.user%} # Postgres user. +pg_password = {%database.password%} # Postgres user's password. +pg_database = {%database.name%} # The database name to connect to. + +#pg_schema = # The database schema to use. If unspecified, + # Kong will respect the `search_path` value of + # your PostgreSQL instance. + +#pg_ssl = off # Toggles client-server TLS connections + # between Kong and PostgreSQL. + # Because PostgreSQL uses the same port for TLS + # and non-TLS, this is only a hint. If the + # server does not support TLS, the established + # connection will be a plain one. + +#pg_ssl_verify = off # Toggles server certificate verification if + # `pg_ssl` is enabled. + # See the `lua_ssl_trusted_certificate` + # setting to specify a certificate authority. + +#pg_max_concurrent_queries = 0 # Sets the maximum number of concurrent queries + # that can be executing at any given time. This + # limit is enforced per worker process; the + # total number of concurrent queries for this + # node will be will be: + # `pg_max_concurrent_queries * nginx_worker_processes`. + # + # The default value of 0 removes this + # concurrency limitation. + +#pg_semaphore_timeout = 60000 # Defines the timeout (in ms) after which + # PostgreSQL query semaphore resource + # acquisition attempts will fail. Such + # failures will generally result in the + # associated proxy or Admin API request + # failing with an HTTP 500 status code. + # Detailed discussion of this behavior is + # available in the online documentation. + +#pg_keepalive_timeout = # Specify the maximal idle timeout (in ms) + # for the postgres connections in the pool. + # If this value is set to 0 then the timeout interval + # is unlimited. + # + # If not specified this value will be same as + # `lua_socket_keepalive_timeout` + +#pg_pool_size = # Specifies the size limit (in terms of connection + # count) for the Postgres server. + # Note that this connection pool is intended + # per Nginx worker rather than per Kong instance. + # + # If not specified, the default value is the same as + # `lua_socket_pool_size` + +#pg_backlog = # If specified, this value will limit the total + # number of open connections to the Postgres + # server to `pg_pool_size`. If the connection + # pool is full, subsequent connect operations + # will be inserted in a queue with size equal + # to this option's value. + # + # If the number of queued connect operations + # reaches `pg_backlog`, exceeding connections will fail. + # + # If not specified, then number of open connections + # to the Postgres server is not limited. + +#pg_ro_host = # Same as `pg_host`, but for the + # read-only connection. + # **Note:** Refer to the documentation + # section above for detailed usage. + +#pg_ro_port = # Same as `pg_port`, but for the + # read-only connection. + +#pg_ro_timeout = # Same as `pg_timeout`, but for the + # read-only connection. + +#pg_ro_user = # Same as `pg_user`, but for the + # read-only connection. + +#pg_ro_password = # Same as `pg_password`, but for the + # read-only connection. + +#pg_ro_database = # Same as `pg_database`, but for the + # read-only connection. + +#pg_ro_schema = # Same as `pg_schema`, but for the + # read-only connection. + +#pg_ro_ssl = # Same as `pg_ssl`, but for the + # read-only connection. + +#pg_ro_ssl_verify = + # Same as `pg_ssl_verify`, but for the + # read-only connection. + +#pg_ro_max_concurrent_queries = + # Same as `pg_max_concurrent_queries`, but for + # the read-only connection. + # Note: read-only concurrency is not shared + # with the main (read-write) connection. + +#pg_ro_semaphore_timeout = + # Same as `pg_semaphore_timeout`, but for the + # read-only connection. + +#pg_ro_keepalive_timeout = + # Same as `pg_keepalive_timeout`, but for the + # read-only connection. + +#pg_ro_pool_size = + # Same as `pg_pool_size`, but for the + # read-only connection. + +#pg_ro_backlog = + # Same as `pg_backlog`, but for the + # read-only connection. + +#declarative_config = # The path to the declarative configuration + # file which holds the specification of all + # entities (Routes, Services, Consumers, etc.) + # to be used when the `database` is set to + # `off`. + # + # Entities are stored in Kong's LMDB cache, + # so you must ensure that enough headroom is + # allocated to it via the `lmdb_map_size` + # property. + # + # If the Hybrid mode `role` is set to `data_plane` + # and there's no configuration cache file, + # this configuration is used before connecting + # to the Control Plane node as a user-controlled + # fallback. + +#declarative_config_string = # The declarative configuration as a string + +#lmdb_environment_path = dbless.lmdb # Directory where the LMDB database files used by + # DB-less and Hybrid mode to store Kong + # configurations reside. + # + # This path is relative under the Kong `prefix`. + +#lmdb_map_size = 2048m # Maximum size of the LMDB memory map, used to store the + # DB-less and Hybird mode configurations. Default is 2048m. + # + # This config defines the limit of LMDB file size, the + # actual file size growth will be on-demand and + # proportional to the actual config size. + # + # Note this value can be set very large, say a couple of GBs + # to accommodate future database growth and + # Multi Version Concurrency Control (MVCC) headroom needs. + # The file size of the LMDB database file should stabilize + # after a few config reload/Hybrid mode syncs and the actual + # memory used by the LMDB database will be smaller than + # the file size due to dynamic swapping of database pages by + # the OS. + +#------------------------------------------------------------------------------ +# DATASTORE CACHE +#------------------------------------------------------------------------------ + +# In order to avoid unnecessary communication with the datastore, Kong caches +# entities (such as APIs, Consumers, Credentials...) for a configurable period +# of time. It also handles invalidations if such an entity is updated. +# +# This section allows for configuring the behavior of Kong regarding the +# caching of such configuration entities. + +#db_update_frequency = 5 # Frequency (in seconds) at which to check for + # updated entities with the datastore. + # + # When a node creates, updates, or deletes an + # entity via the Admin API, other nodes need + # to wait for the next poll (configured by + # this value) to eventually purge the old + # cached entity and start using the new one. + +#db_update_propagation = 0 # Time (in seconds) taken for an entity in the + # datastore to be propagated to replica nodes + # of another datacenter. + # + # When set, this property will increase the + # time taken by Kong to propagate the change + # of an entity. + # + # Single-datacenter setups or PostgreSQL + # servers should suffer no such delays, and + # this value can be safely set to 0. + # Postgres setups with read replicas should + # set this value to maximum expected replication + # lag between the writer and reader instances. + +#db_cache_ttl = 0 # Time-to-live (in seconds) of an entity from + # the datastore when cached by this node. + # + # Database misses (no entity) are also cached + # according to this setting if you do not + # configure `db_cache_neg_ttl`. + # + # If set to 0 (default), such cached entities + # or misses never expire. + +#db_cache_neg_ttl = # Time-to-live (in seconds) of a datastore + # miss (no entity). + # + # If not specified (default), `db_cache_ttl` + # value will be used instead. + # + # If set to 0, misses will never expire. + +#db_resurrect_ttl = 30 # Time (in seconds) for which stale entities + # from the datastore should be resurrected for + # when they cannot be refreshed (e.g., the + # datastore is unreachable). When this TTL + # expires, a new attempt to refresh the stale + # entities will be made. + +#db_cache_warmup_entities = services + # Entities to be pre-loaded from the datastore + # into the in-memory cache at Kong start-up. + # This speeds up the first access of endpoints + # that use the given entities. + # + # When the `services` entity is configured + # for warmup, the DNS entries for values in + # its `host` attribute are pre-resolved + # asynchronously as well. + # + # Cache size set in `mem_cache_size` should + # be set to a value large enough to hold all + # instances of the specified entities. + # If the size is insufficient, Kong will log + # a warning. + +#------------------------------------------------------------------------------ +# DNS RESOLVER +#------------------------------------------------------------------------------ + +# By default, the DNS resolver will use the standard configuration files +# `/etc/hosts` and `/etc/resolv.conf`. The settings in the latter file will be +# overridden by the environment variables `LOCALDOMAIN` and `RES_OPTIONS` if +# they have been set. +# +# Kong will resolve hostnames as either `SRV` or `A` records (in that order, and +# `CNAME` records will be dereferenced in the process). +# In case a name was resolved as an `SRV` record it will also override any given +# port number by the `port` field contents received from the DNS server. +# +# The DNS options `SEARCH` and `NDOTS` (from the `/etc/resolv.conf` file) will +# be used to expand short names to fully qualified ones. So it will first try +# the entire `SEARCH` list for the `SRV` type, if that fails it will try the +# `SEARCH` list for `A`, etc. +# +# For the duration of the `ttl`, the internal DNS resolver will loadbalance each +# request it gets over the entries in the DNS record. For `SRV` records the +# `weight` fields will be honored, but it will only use the lowest `priority` +# field entries in the record. + +#dns_resolver = # Comma separated list of nameservers, each + # entry in `ip[:port]` format to be used by + # Kong. If not specified the nameservers in + # the local `resolv.conf` file will be used. + # Port defaults to 53 if omitted. Accepts + # both IPv4 and IPv6 addresses. + +#dns_hostsfile = /etc/hosts # The hosts file to use. This file is read + # once and its content is static in memory. + # To read the file again after modifying it, + # Kong must be reloaded. + +#dns_order = LAST,SRV,A,CNAME # The order in which to resolve different + # record types. The `LAST` type means the + # type of the last successful lookup (for the + # specified name). The format is a (case + # insensitive) comma separated list. + +#dns_valid_ttl = # By default, DNS records are cached using + # the TTL value of a response. If this + # property receives a value (in seconds), it + # will override the TTL for all records. + +#dns_stale_ttl = 4 # Defines, in seconds, how long a record will + # remain in cache past its TTL. This value + # will be used while the new DNS record is + # fetched in the background. + # Stale data will be used from expiry of a + # record until either the refresh query + # completes, or the `dns_stale_ttl` number of + # seconds have passed. + +#dns_cache_size = 10000 # Defines the maximum allowed number of + # DNS records stored in memory cache. + # Least recently used DNS records are discarded + # from cache if it is full. Both errors and + # data are cached, therefore a single name query + # can easily take up 10-15 slots. + +#dns_not_found_ttl = 30 # TTL in seconds for empty DNS responses and + # "(3) name error" responses. + +#dns_error_ttl = 1 # TTL in seconds for error responses. + +#dns_no_sync = off # If enabled, then upon a cache-miss every + # request will trigger its own dns query. + # When disabled multiple requests for the + # same name/type will be synchronised to a + # single query. + +#------------------------------------------------------------------------------ +# VAULTS +#------------------------------------------------------------------------------ + +# A secret is any sensitive piece of information required for API gateway +# operations. Secrets may be part of the core Kong Gateway configuration, +# used in plugins, or part of the configuration associated with APIs serviced +# by the gateway. +# +# Some of the most common types of secrets used by Kong Gateway include: +# +# - Data store usernames and passwords, used with PostgreSQL and Redis +# - Private X.509 certificates +# - API keys +# +# Sensitive plugin configuration fields are generally used for authentication, +# hashing, signing, or encryption. Kong Gateway lets you store certain values +# in a vault. Here are the vault specific configuration options. + +#vault_env_prefix = # Defines the environment variable vault's + # default prefix. For example if you have + # all your secrets stored in environment + # variables prefixed with `SECRETS_`, it + # can be configured here so that it isn't + # necessary to repeat them in Vault + # references. + +#------------------------------------------------------------------------------ +# TUNING & BEHAVIOR +#------------------------------------------------------------------------------ + +#worker_consistency = eventual + # Defines whether this node should rebuild its + # state synchronously or asynchronously (the + # balancers and the router are rebuilt on + # updates that affects them, e.g., updates to + # Routes, Services or Upstreams, via the Admin + # API or loading a declarative configuration + # file). (This option is deprecated and will be + # removed in future releases. The new default + # is `eventual`.) + # + # Accepted values are: + # + # - `strict`: the router will be rebuilt + # synchronously, causing incoming requests to + # be delayed until the rebuild is finished. + # (This option is deprecated and will be removed + # in future releases. The new default is `eventual`) + # - `eventual`: the router will be rebuilt + # asynchronously via a recurring background + # job running every second inside of each + # worker. + # + # Note that `strict` ensures that all workers + # of a given node will always proxy requests + # with an identical router, but that increased + # long tail latency can be observed if + # frequent Routes and Services updates are + # expected. + # Using `eventual` will help preventing long + # tail latency issues in such cases, but may + # cause workers to route requests differently + # for a short period of time after Routes and + # Services updates. + +#worker_state_update_frequency = 5 + # Defines how often the worker state changes are + # checked with a background job. When a change + # is detected, a new router or balancer will be + # built, as needed. Raising this value will + # decrease the load on database servers and + # result in less jitter in proxy latency, but + # it might take more time to propagate changes + # to each individual worker. + +#router_flavor = traditional_compatible + # Selects the router implementation to use when + # performing request routing. Incremental router + # rebuild is available when the flavor is set + # to either `expressions` or + # `traditional_compatible` which could + # significantly shorten rebuild time for large + # number of routes. + # + # Accepted values are: + # + # - `traditional_compatible`: the DSL based expression + # router engine will be used under the hood. However + # the router config interface will be the same + # as `traditional` and expressions are + # automatically generated at router build time. + # The `expression` field on the `Route` object + # is not visible. + # - `expressions`: the DSL based expression router engine + # will be used under the hood. Traditional router + # config interface is not visible and you must write + # Router Expression manually and provide them in the + # `expression` field on the `Route` object. + # - `traditional`: the pre-3.0 Router engine will be + # used. Config interface will be the same as + # pre-3.0 Kong and the `expression` field on the + # `Route` object is not visible. + # + # Deprecation warning: In Kong 3.0, `traditional` + # mode should be avoided and only be used in case + # `traditional_compatible` did not work as expected. + # This flavor of router will be removed in the next + # major release of Kong. + +#lua_max_req_headers = 100 # Maximum number of request headers to parse by default. + # + # This argument can be set to an integer between 1 and 1000. + # + # When proxying the Kong sends all the request headers + # and this setting does not have any effect. It is used + # to limit Kong and its plugins from reading too many + # request headers. + +#lua_max_resp_headers = 100 # Maximum number of response headers to parse by default. + # + # This argument can be set to an integer between 1 and 1000. + # + # When proxying, Kong returns all the response headers + # and this setting does not have any effect. It is used + # to limit Kong and its plugins from reading too many + # response headers. + +#lua_max_uri_args = 100 # Maximum number of request uri arguments to parse by + # default. + # + # This argument can be set to an integer between 1 and 1000. + # + # When proxying, Kong sends all the request query + # arguments and this setting does not have any effect. + # It is used to limit Kong and its plugins from reading + # too many query arguments. + +#lua_max_post_args = 100 # Maximum number of request post arguments to parse by + # default. + # + # This argument can be set to an integer between 1 and 1000. + # + # When proxying, Kong sends all the request post + # arguments and this setting does not have any effect. + # It is used to limit Kong and its plugins from reading + # too many post arguments. + +#------------------------------------------------------------------------------ +# MISCELLANEOUS +#------------------------------------------------------------------------------ + +# Additional settings inherited from lua-nginx-module allowing for more +# flexibility and advanced usage. +# +# See the lua-nginx-module documentation for more information: +# https://github.com/openresty/lua-nginx-module + + +#lua_ssl_trusted_certificate = system # Comma-separated list of certificate authorities + # for Lua cosockets in PEM format. + # + # The special value `system` attempts to search for the + # "usual default" provided by each distro, according + # to an arbitrary heuristic. In the current implementation, + # The following pathnames will be tested in order, + # and the first one found will be used: + # + # - /etc/ssl/certs/ca-certificates.crt (Debian/Ubuntu/Gentoo) + # - /etc/pki/tls/certs/ca-bundle.crt (Fedora/RHEL 6) + # - /etc/ssl/ca-bundle.pem (OpenSUSE) + # - /etc/pki/tls/cacert.pem (OpenELEC) + # - /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem (CentOS/RHEL 7) + # - /etc/ssl/cert.pem (OpenBSD, Alpine) + # + # `system` can be used by itself or in conjunction with other + # CA filepaths. + # + # When `pg_ssl_verify` is enabled, these + # certificate authority files will be + # used for verifying Kong's database connections. + # + # Certificates can be configured on this property + # with either of the following values: + # * `system` + # * absolute path to the certificate + # * certificate content + # * base64 encoded certificate content + # + # See https://github.com/openresty/lua-nginx-module#lua_ssl_trusted_certificate + +#lua_ssl_verify_depth = 1 # Sets the verification depth in the server + # certificates chain used by Lua cosockets, + # set by `lua_ssl_trusted_certificate`. + # This includes the certificates configured + # for Kong's database connections. + # If the maximum depth is reached before + # reaching the end of the chain, verification + # will fail. This helps mitigate certificate + # based DoS attacks. + # + # See https://github.com/openresty/lua-nginx-module#lua_ssl_verify_depth + +#lua_ssl_protocols = TLSv1.1 TLSv1.2 TLSv1.3 # Defines the TLS versions supported + # when handshaking with OpenResty's + # TCP cosocket APIs. + # + # This affects connections made by Lua + # code, such as connections to the + # database Kong uses, or when sending logs + # using a logging plugin. It does *not* + # affect connections made to the upstream + # Service or from downstream clients. + +#lua_package_path = ./?.lua;./?/init.lua; # Sets the Lua module search path + # (LUA_PATH). Useful when developing + # or using custom plugins not stored + # in the default search path. + # + # See https://github.com/openresty/lua-nginx-module#lua_package_path + +#lua_package_cpath = # Sets the Lua C module search path + # (LUA_CPATH). + # + # See https://github.com/openresty/lua-nginx-module#lua_package_cpath + +#lua_socket_pool_size = 30 # Specifies the size limit for every cosocket + # connection pool associated with every remote + # server. + # + # See https://github.com/openresty/lua-nginx-module#lua_socket_pool_size + +#untrusted_lua = sandbox + # Controls loading of Lua functions from admin-supplied + # sources such as the Admin API. LuaJIT bytecode + # loading is always disabled. + # + # **Warning:** LuaJIT is not designed as a secure + # runtime for running malicious code, therefore + # you should properly protect your Admin API endpoint + # even with sandboxing enabled. The sandbox only + # provides protection against trivial attackers or + # unintentional modification of the Kong global + # environment. + # + # Accepted values are: `off`, `sandbox`, or + # `on`: + # + # * `off`: Disallow loading of any arbitrary + # Lua functions. The `off` option + # disables any functionality that runs + # arbitrary Lua code, including the + # Serverless Functions plugins and any + # transformation plugin that allows + # custom Lua functions. + # + # * `sandbox`: Allow loading of Lua functions, + # but use a sandbox when executing + # them. The sandboxed function has + # restricted access to the global + # environment and only has access + # to standard Lua functions that + # will generally not cause harm to + # the Kong Gateway node. + # + # * `on`: Functions have unrestricted + # access to the global environment and + # can load any Lua modules. This is + # similar to the behavior in + # Kong Gateway prior to 2.3.0. + # + # The default `sandbox` environment does not + # allow importing other modules or libraries, + # or executing anything at the OS level (for + # example, file read/write). The global + # environment is also not accessible. + # + # Examples of `untrusted_lua = sandbox` + # behavior: + # + # * You can't access or change global values + # such as `kong.configuration.pg_password` + # * You can run harmless lua: + # `local foo = 1 + 1`. However, OS level + # functions are not allowed, like: + # `os.execute('rm -rf /*')`. + # + # For a full allowed/disallowed list, see: + # https://github.com/kikito/sandbox.lua/blob/master/sandbox.lua + # + # To customize the sandbox environment, use + # the `untrusted_lua_sandbox_requires` and + # `untrusted_lua_sandbox_environment` + # parameters below. + +#untrusted_lua_sandbox_requires = # Comma-separated list of modules allowed to + # be loaded with `require` inside the + # sandboxed environment. Ignored + # if `untrusted_lua` is not `sandbox`. + # + # For example, say you have configured the + # Serverless pre-function plugin and it + # contains the following `requires`: + # + # ``` + # local template = require "resty.template" + # local split = require "kong.tools.utils".split + # ``` + # + # To run the plugin, add the modules to the + # allowed list: + # ``` + # untrusted_lua_sandbox_requires = resty.template, kong.tools.utils + # ``` + # + # **Warning:** Allowing certain modules may + # create opportunities to escape the + # sandbox. For example, allowing `os` or + # `luaposix` may be unsafe. + +#untrusted_lua_sandbox_environment = # Comma-separated list of global Lua + # variables that should be made available + # inside the sandboxed environment. Ignored + # if `untrusted_lua` is not `sandbox`. + # + # **Warning**: Certain variables, when made + # available, may create opportunities to + # escape the sandbox. + +#openresty_path = # Path to the OpenResty installation that Kong + # will use. When this is empty (the default), + # Kong determines the OpenResty installation + # by searching for a system-installed OpenResty + # and falling back to searching $PATH for the + # nginx binary. + # + # Setting this attribute disables the search + # behavior and explicitly instructs Kong which + # OpenResty installation to use. + +#node_id = # Node ID for the Kong node. Every Kong node + # in a Kong cluster must have a unique and + # valid UUID. When empty, node ID is + # automatically generated. + +#------------------------------------------------------------------------------ +# KONG MANAGER +#------------------------------------------------------------------------------ + +# The Admin GUI for Kong Gateway. +admin_gui_listen = 127.0.0.1:{%admin.ui.port%}, 127.0.0.1:{%admin.ui.ssl.port%} ssl, {%listen.ip%}:{%admin.ui.port%}, {%listen.ip%}:{%admin.ui.ssl.port%} ssl +#admin_gui_listen = 0.0.0.0:8002, 0.0.0.0:8445 ssl + # Kong Manager Listeners + # + # Comma-separated list of addresses and ports on which + # Kong will expose Kong Manager. This web application + # lets you configure and manage Kong, and therefore + # should be kept secured. + # + # Suffixes can be specified for each pair, similarly to + # the `admin_listen` directive. + +#admin_gui_url = + # Kong Manager URL + # + # The lookup, or balancer, address for Kong Manager. + # + # When set, the CORS headers in the Admin API response + # will also change to the corresponding origin + # + # Accepted format (items in parentheses are optional): + # + # `://(:)` + # + # Examples: + # + # - `http://127.0.0.1:8003` + # - `https://kong-manager.test` + # - `http://dev-machine` + +#admin_gui_path = / + # Kong Manager base path + # + # This configuration parameter allows the user to customize + # the path prefix where Kong Manager is served. When updating + # this parameter, it's recommended to update the path in + # `admin_gui_url` as well. + # + # Accepted format: + # + # - Path must start with a `/` + # - Path must not end with a `/` (except for the `/`) + # - Path can only contain letters, digits, hyphens (`-`), + # underscores (`_`), and slashes (`/`) + # - Path must not contain continuous slashes (e.g., `//` and `///`) + # + # Examples: + # + # - `/` + # - `/manager` + # - `/kong-manager` + # - `/kong/manager` + +#admin_gui_api_url = + # Hierarchical part of a URL which is composed + # optionally of a host, port, and path at which the + # Admin API accepts HTTP or HTTPS traffic. When + # this config is not provided, Kong Manager will + # use the window protocol + host and append the + # resolved admin_listen HTTP/HTTPS port. + +#admin_gui_ssl_cert = + # The SSL certificate for `admin_gui_listen` values + # with SSL enabled. + # + # values: + # * absolute path to the certificate + # * certificate content + # * base64 encoded certificate content + +#admin_gui_ssl_cert_key = + # The SSL key for `admin_gui_listen` values with SSL + # enabled. + # + # values: + # * absolute path to the certificate key + # * certificate key content + # * base64 encoded certificate key content + +#admin_gui_access_log = logs/admin_gui_access.log + # Kong Manager Access Logs + # + # Here you can set an absolute or relative path for + # Kong Manager access logs. When the path is relative, + # logs are placed in the `prefix` location. + # + # Setting this value to `off` disables access logs + # for Kong Manager. + + +#admin_gui_error_log = logs/admin_gui_error.log + # Kong Manager Error Logs + # + # Here you can set an absolute or relative path for + # Kong Manager access logs. When the path is relative, + # logs are placed in the `prefix` location. + # + # Setting this value to `off` disables error logs for + # Kong Manager. + # + # Granularity can be adjusted through the `log_level` + # directive. + + +#------------------------------------------------------------------------------ +# WASM +#------------------------------------------------------------------------------ + +#wasm = off # Enable/disable wasm support. This must be enabled in + # order to use wasm filters and filter chains. + +#wasm_filters_path = # Path to the directory containing wasm filter modules. + # + # At startup, Kong discovers available wasm filters by + # scanning this directory for files with the `.wasm` + # file extension. + # + # The name of a wasm filter module is derived from the + # filename itself, with the .wasm extension removed. So, + # given the following tree: + # + # ``` + # /path/to/wasm_filters + # ├── my_module.wasm + # ├── my_other_module.wasm + # └── not_a_wasm_module.txt + # ``` + # + # The resulting filter modules available for use in Kong + # will be: + # + # * `my_module` + # * `my_other_module` + # + # Notes: + # + # * No recursion is performed. Only .wasm files at the + # top level are registered + # * This path _may_ be a symlink to a directory. diff --git a/python/cloudtik/runtime/kong/config/commands.yaml b/python/cloudtik/runtime/kong/config/commands.yaml new file mode 100644 index 000000000..c0914dd2f --- /dev/null +++ b/python/cloudtik/runtime/kong/config/commands.yaml @@ -0,0 +1,25 @@ +# Custom commands that will be run on the head node after common setup. +head_setup_commands: + - cloudtik runtime install kong --head + - cloudtik runtime configure kong --head + +# Custom commands that will be run on worker nodes after common setup. +worker_setup_commands: + - cloudtik runtime install kong + - cloudtik runtime configure kong + +# Command to start on the head node. You don't need to change this. +head_start_commands: + - cloudtik runtime services kong start --head + +# Command to start on worker nodes. You don't need to change this. +worker_start_commands: + - cloudtik runtime services kong start + +# Command to start on the head node. You don't need to change this. +head_stop_commands: + - cloudtik runtime services kong stop --head + +# Command to start on worker nodes. You don't need to change this. +worker_stop_commands: + - cloudtik runtime services kong stop diff --git a/python/cloudtik/runtime/kong/config/defaults.yaml b/python/cloudtik/runtime/kong/config/defaults.yaml new file mode 100644 index 000000000..c0f006740 --- /dev/null +++ b/python/cloudtik/runtime/kong/config/defaults.yaml @@ -0,0 +1 @@ +# Configuration defaults diff --git a/python/cloudtik/runtime/kong/runtime.py b/python/cloudtik/runtime/kong/runtime.py new file mode 100644 index 000000000..1cead50e8 --- /dev/null +++ b/python/cloudtik/runtime/kong/runtime.py @@ -0,0 +1,78 @@ +import logging +from typing import Any, Dict + +from cloudtik.core._private.runtime_factory import BUILT_IN_RUNTIME_POSTGRES +from cloudtik.core.node_provider import NodeProvider +from cloudtik.runtime.common.runtime_base import RuntimeBase +from cloudtik.runtime.kong.utils import _get_runtime_processes, \ + _get_runtime_services, _with_runtime_environment_variables, _config_depended_services, _prepare_config_on_head, \ + _validate_config, _get_runtime_endpoints, _get_head_service_ports, _get_runtime_logs, _configure, _services + +logger = logging.getLogger(__name__) + + +class KongRuntime(RuntimeBase): + """Implementation for Kong Runtime for API Gateway""" + + def __init__(self, runtime_config: Dict[str, Any]) -> None: + super().__init__(runtime_config) + + def prepare_config(self, cluster_config: Dict[str, Any]) -> Dict[str, Any]: + """Prepare runtime specific configurations""" + cluster_config = _config_depended_services(cluster_config) + return cluster_config + + def prepare_config_on_head( + self, cluster_config: Dict[str, Any] + ) -> Dict[str, Any]: + """Configure runtime such as using service discovery to configure + internal service addresses the runtime depends. + The head configuration will be updated and saved with the returned configuration. + """ + return _prepare_config_on_head(cluster_config) + + def validate_config(self, cluster_config: Dict[str, Any]): + """Validate cluster configuration from runtime perspective.""" + _validate_config(cluster_config) + + def with_environment_variables( + self, config: Dict[str, Any], provider: NodeProvider, + node_id: str) -> Dict[str, Any]: + """Export necessary runtime environment variables for running node commands. + For example: {"ENV_NAME": value} + """ + return _with_runtime_environment_variables( + self.runtime_config, config=config) + + def configure(self, head: bool): + """ This method is called on every node as the first step of executing runtime + configure command. + """ + _configure(self.runtime_config, head) + + def services(self, head: bool): + """ This method is called on every node as the first step of executing runtime + services command. + """ + _services(self.runtime_config, head) + + def get_runtime_endpoints(self, cluster_head_ip: str): + return _get_runtime_endpoints(self.runtime_config, cluster_head_ip) + + def get_head_service_ports(self) -> Dict[str, Any]: + return _get_head_service_ports(self.runtime_config) + + def get_runtime_services(self, cluster_name: str): + return _get_runtime_services(self.runtime_config, cluster_name) + + @staticmethod + def get_processes(): + return _get_runtime_processes() + + @staticmethod + def get_logs() -> Dict[str, str]: + return _get_runtime_logs() + + @staticmethod + def get_dependencies(): + return [BUILT_IN_RUNTIME_POSTGRES] diff --git a/python/cloudtik/runtime/kong/scripts/configure.sh b/python/cloudtik/runtime/kong/scripts/configure.sh new file mode 100644 index 000000000..a51bf4893 --- /dev/null +++ b/python/cloudtik/runtime/kong/scripts/configure.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +# Current bin directory +BIN_DIR=`dirname "$0"` +ROOT_DIR="$(dirname "$(dirname "$BIN_DIR")")" + +args=$(getopt -a -o h:: -l head:: -- "$@") +eval set -- "${args}" + +USER_HOME=/home/$(whoami) +RUNTIME_PATH=$USER_HOME/runtime +KONG_HOME=$RUNTIME_PATH/kong + +# Util functions +. "$ROOT_DIR"/common/scripts/util-functions.sh + +function prepare_base_conf() { + local source_dir=$(dirname "${BIN_DIR}")/conf + output_dir=/tmp/kong/conf + rm -rf $output_dir + mkdir -p $output_dir + cp -r $source_dir/* $output_dir +} + +function check_kong_installed() { + if ! command -v kong &> /dev/null + then + echo "Kong is not installed for kong command is not available." + exit 1 + fi +} + +function configure_database() { + DATABASE_NAME=kong + DATABASE_USER=kong + # TODO: allow user to specify the database password + DATABASE_PASSWORD=kong + if [ "${SQL_DATABASE}" == "true" ]; then + # a standalone SQL database + DATABASE_HOST=${SQL_DATABASE_HOST} + DATABASE_PORT=${SQL_DATABASE_PORT} + DATABASE_ENGINE=${SQL_DATABASE_ENGINE} + else + echo "ERROR: No SQL database configured." + exit 1 + fi + + sed -i "s#{%database.host%}#${DATABASE_HOST}#g" ${config_template_file} + sed -i "s#{%database.port%}#${DATABASE_PORT}#g" ${config_template_file} + sed -i "s/{%database.name%}/${DATABASE_NAME}/g" ${config_template_file} + sed -i "s/{%database.user%}/${DATABASE_USER}/g" ${config_template_file} + sed -i "s/{%database.password%}/${DATABASE_PASSWORD}/g" ${config_template_file} +} + +function configure_kong() { + prepare_base_conf + mkdir -p ${KONG_HOME}/logs + + KONG_CONF_DIR=${KONG_HOME}/conf + mkdir -p ${KONG_CONF_DIR} + + config_template_file=${output_dir}/kong.conf + + sed -i "s#{%listen.ip%}#${NODE_IP_ADDRESS}#g" ${config_template_file} + sed -i "s#{%listen.port%}#${KONG_SERVICE_PORT}#g" ${config_template_file} + sed -i "s#{%listen.ssl.port%}#${KONG_SERVICE_SSL_PORT}#g" ${config_template_file} + sed -i "s#{%admin.port%}#${KONG_ADMIN_PORT}#g" ${config_template_file} + sed -i "s#{%admin.ssl.port%}#${KONG_ADMIN_SSL_PORT}#g" ${config_template_file} + sed -i "s#{%admin.ui.port%}#${KONG_ADMIN_UI_PORT}#g" ${config_template_file} + sed -i "s#{%admin.ui.ssl.port%}#${KONG_ADMIN_UI_SSL_PORT}#g" ${config_template_file} + + configure_database + + cp ${config_template_file} ${KONG_CONF_DIR}/kong.conf +} + +set_head_option "$@" +check_kong_installed +set_node_ip_address +configure_kong + +exit 0 diff --git a/python/cloudtik/runtime/kong/scripts/install.sh b/python/cloudtik/runtime/kong/scripts/install.sh new file mode 100644 index 000000000..25c45ff31 --- /dev/null +++ b/python/cloudtik/runtime/kong/scripts/install.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Current bin directory +BIN_DIR=`dirname "$0"` +ROOT_DIR="$(dirname "$(dirname "$BIN_DIR")")" + +args=$(getopt -a -o h:: -l head:: -- "$@") +eval set -- "${args}" + +KONG_VERSION=3.4 + +# Util functions +. "$ROOT_DIR"/common/scripts/util-functions.sh + +function install_kong() { + if ! command -v kong &> /dev/null + then + # WARNING: Kong cannot coexists with APISIX. Both install openresty + echo "deb [trusted=yes] https://download.konghq.com/gateway-3.x-ubuntu-$(lsb_release -sc)/ default all" \ + | sudo tee /etc/apt/sources.list.d/kong.list >/dev/null + + sudo apt-get -qq update -y > /dev/null && \ + sudo DEBIAN_FRONTEND=noninteractive apt-get install -qq -y \ + kong=${KONG_VERSION}.\* > /dev/null && \ + sudo rm -f /etc/apt/sources.list.d/kong.list + fi +} + +set_head_option "$@" +install_kong +clean_install_cache diff --git a/python/cloudtik/runtime/kong/scripts/schema-init.sh b/python/cloudtik/runtime/kong/scripts/schema-init.sh new file mode 100644 index 000000000..bd0de3c29 --- /dev/null +++ b/python/cloudtik/runtime/kong/scripts/schema-init.sh @@ -0,0 +1,31 @@ + +function create_database_schema() { + DATABASE_NAME=kong + DATABASE_USER=kong + # TODO: allow user to specify the database password + DATABASE_PASSWORD=kong + # Use psql to create the user and database + DATABASE_EXISTS=$(PGPASSWORD=${SQL_DATABASE_PASSWORD} psql -lqt --host=${SQL_DATABASE_HOST} \ + --port=${SQL_DATABASE_PORT} \ + --username=${SQL_DATABASE_USERNAME} | cut -d \| -f 1 | grep -w $DATABASE_NAME | wc -l) || true + if [[ $DATABASE_EXISTS == 0 ]]; then + echo "CREATE USER $DATABASE_USER WITH PASSWORD '$DATABASE_PASSWORD'\gexec" | PGPASSWORD=${SQL_DATABASE_PASSWORD} \ + psql \ + --host=${SQL_DATABASE_HOST} \ + --port=${SQL_DATABASE_PORT} \ + --username=${SQL_DATABASE_USERNAME} > ${KONG_HOME}/logs/configure.log + echo "SELECT 'CREATE DATABASE ${DATABASE_NAME} OWNER $DATABASE_USER' WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '${DATABASE_NAME}')\gexec" | PGPASSWORD=${SQL_DATABASE_PASSWORD} \ + psql \ + --host=${SQL_DATABASE_HOST} \ + --port=${SQL_DATABASE_PORT} \ + --username=${SQL_DATABASE_USERNAME} > ${KONG_HOME}/logs/configure.log + fi +} + +function init_schema() { + create_database_schema + + ADMIN_PASSWORD=kong + KONG_PASSWORD=$ADMIN_PASSWORD sudo -E env "PATH=$PATH" \ + kong migrations bootstrap -c ${KONG_CONFIG_FILE} > ${KONG_HOME}/logs/configure.log 2>&1 +} diff --git a/python/cloudtik/runtime/kong/scripts/services.sh b/python/cloudtik/runtime/kong/scripts/services.sh new file mode 100644 index 000000000..c5e4b5fee --- /dev/null +++ b/python/cloudtik/runtime/kong/scripts/services.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Current bin directory +BIN_DIR=`dirname "$0"` +ROOT_DIR="$(dirname "$(dirname "$BIN_DIR")")" + +args=$(getopt -a -o h:: -l head:: -- "$@") +eval set -- "${args}" + +USER_HOME=/home/$(whoami) +RUNTIME_PATH=$USER_HOME/runtime +KONG_HOME=$RUNTIME_PATH/kong +KONG_CONFIG_FILE=${KONG_HOME}/conf/kong.conf + +# import util functions +. "$ROOT_DIR"/common/scripts/util-functions.sh + +# schema initialization functions +. "$BIN_DIR"/schema-init.sh + +set_head_option "$@" +set_service_command "$@" + +case "$SERVICE_COMMAND" in +start) + if [ "${IS_HEAD_NODE}" == "true" ]; then + # do schema check and init only on head + init_schema + fi + + sudo env "PATH=$PATH" kong start \ + -c ${KONG_CONFIG_FILE} \ + >${KONG_HOME}/logs/kong.log 2>&1 + ;; +stop) + sudo env "PATH=$PATH" kong stop \ + >${KONG_HOME}/logs/kong.log 2>&1 + ;; +-h|--help) + echo "Usage: $0 start|stop --head" >&2 + ;; +*) + echo "Usage: $0 start|stop --head" >&2 + ;; +esac + +exit 0 diff --git a/python/cloudtik/runtime/kong/utils.py b/python/cloudtik/runtime/kong/utils.py new file mode 100644 index 000000000..44e09756f --- /dev/null +++ b/python/cloudtik/runtime/kong/utils.py @@ -0,0 +1,203 @@ +import os +from typing import Any, Dict + +from cloudtik.core._private.core_utils import get_address_string +from cloudtik.core._private.runtime_factory import BUILT_IN_RUNTIME_KONG, BUILT_IN_RUNTIME_POSTGRES +from cloudtik.core._private.runtime_utils import get_runtime_bool, \ + get_runtime_value +from cloudtik.core._private.service_discovery.runtime_services import get_service_discovery_runtime +from cloudtik.core._private.service_discovery.utils import \ + get_canonical_service_name, define_runtime_service, \ + get_service_discovery_config, SERVICE_DISCOVERY_FEATURE_API_GATEWAY, SERVICE_DISCOVERY_PROTOCOL_HTTP +from cloudtik.core._private.util.database_utils import is_database_configured, export_database_environment_variables, \ + DATABASE_ENGINE_POSTGRES, get_database_engine, DATABASE_ENV_ENABLED, DATABASE_ENV_ENGINE +from cloudtik.core._private.utils import get_runtime_config, is_use_managed_cloud_database, PROVIDER_DATABASE_CONFIG_KEY +from cloudtik.runtime.common.service_discovery.runtime_discovery import \ + DATABASE_CONNECT_KEY, is_database_service_discovery, discover_database_on_head, \ + discover_database_from_workspace + +RUNTIME_PROCESSES = [ + # The first element is the substring to filter. + # The second element, if True, is to filter ps results by command name. + # The third element is the process name. + # The forth element, if node, the process should on all nodes,if head, the process should on head node. + ["/usr/local/kong", False, "KONG", "node"], + ] + +KONG_SERVICE_PORT_CONFIG_KEY = "port" +KONG_SERVICE_SSL_PORT_CONFIG_KEY = "ssl_port" + +KONG_SERVICE_NAME = BUILT_IN_RUNTIME_KONG + +KONG_SERVICE_PORT_DEFAULT = 8000 +KONG_SERVICE_SSL_PORT_DEFAULT = 8443 + +KONG_ADMIN_PORT_DEFAULT = 8001 +KONG_ADMIN_SSL_PORT_DEFAULT = 8444 +KONG_ADMIN_UI_PORT_DEFAULT = 8002 +KONG_ADMIN_UI_SSL_PORT_DEFAULT = 8445 + + +def _get_config(runtime_config: Dict[str, Any]): + return runtime_config.get(BUILT_IN_RUNTIME_KONG, {}) + + +def _get_database_config(metastore_config): + return metastore_config.get(DATABASE_CONNECT_KEY, {}) + + +def _get_service_port(kong_config: Dict[str, Any]): + return kong_config.get( + KONG_SERVICE_PORT_CONFIG_KEY, KONG_SERVICE_PORT_DEFAULT) + + +def _get_service_ssl_port(kong_config: Dict[str, Any]): + return kong_config.get( + KONG_SERVICE_SSL_PORT_CONFIG_KEY, KONG_SERVICE_SSL_PORT_DEFAULT) + + +def _get_home_dir(): + return os.path.join( + os.getenv("HOME"), "runtime", BUILT_IN_RUNTIME_KONG) + + +def _get_runtime_processes(): + return RUNTIME_PROCESSES + + +def _get_runtime_logs(): + home_dir = _get_home_dir() + logs_dir = os.path.join(home_dir, "logs") + return {BUILT_IN_RUNTIME_KONG: logs_dir} + + +def _config_depended_services(cluster_config: Dict[str, Any]) -> Dict[str, Any]: + cluster_config = discover_database_from_workspace( + cluster_config, BUILT_IN_RUNTIME_KONG, + database_runtime_type=BUILT_IN_RUNTIME_POSTGRES, + allow_local=False + ) + return cluster_config + + +def _prepare_config_on_head(cluster_config: Dict[str, Any]): + cluster_config = discover_database_on_head( + cluster_config, BUILT_IN_RUNTIME_KONG, + database_runtime_type=BUILT_IN_RUNTIME_POSTGRES, + allow_local=False) + + _validate_config(cluster_config, final=True) + return cluster_config + + +def _is_valid_database_config(config: Dict[str, Any], final=False): + # Check database configuration + runtime_config = get_runtime_config(config) + kong_config = _get_config(runtime_config) + database_config = _get_database_config(kong_config) + if is_database_configured(database_config): + if get_database_engine(database_config) != DATABASE_ENGINE_POSTGRES: + return False + return True + + # check whether cloud database is available (must be postgres) + provider_config = config["provider"] + if (PROVIDER_DATABASE_CONFIG_KEY in provider_config or + (not final and is_use_managed_cloud_database(config))): + return True + + # if there is service discovery mechanism, assume we can get from service discovery + if (not final and is_database_service_discovery(kong_config) + and get_service_discovery_runtime(runtime_config)): + return True + + return False + + +def _validate_config(config: Dict[str, Any], final=False): + if not _is_valid_database_config(config, final): + raise ValueError("Postgres must be configured for Kong.") + + +def _with_runtime_environment_variables( + runtime_config, config): + runtime_envs = {} + kong_config = _get_config(runtime_config) + + service_port = _get_service_port(kong_config) + runtime_envs["KONG_SERVICE_PORT"] = service_port + service_ssl_port = _get_service_ssl_port(kong_config) + runtime_envs["KONG_SERVICE_SSL_PORT"] = service_ssl_port + runtime_envs["KONG_ADMIN_PORT"] = KONG_ADMIN_PORT_DEFAULT + runtime_envs["KONG_ADMIN_SSL_PORT"] = KONG_ADMIN_SSL_PORT_DEFAULT + runtime_envs["KONG_ADMIN_UI_PORT"] = KONG_ADMIN_UI_PORT_DEFAULT + runtime_envs["KONG_ADMIN_UI_SSL_PORT"] = KONG_ADMIN_UI_SSL_PORT_DEFAULT + + return runtime_envs + + +def _export_database_configurations(runtime_config): + kong_config = _get_config(runtime_config) + database_config = _get_database_config(kong_config) + if is_database_configured(database_config): + # set the database environments from database config + # This may override the environments from provider + export_database_environment_variables(database_config) + else: + # check cloud database is configured + database_enabled = get_runtime_bool(DATABASE_ENV_ENABLED) + if not database_enabled: + raise RuntimeError("No Postgres is configured for Kong.") + database_engine = get_runtime_value(DATABASE_ENV_ENGINE) + if database_engine != DATABASE_ENGINE_POSTGRES: + raise RuntimeError("Postgres must be configured for Kong.") + + +def _configure(runtime_config, head: bool): + _export_database_configurations(runtime_config) + + +def _services(runtime_config, head: bool): + # We put the database schema init right before the start of metastore service + _export_database_configurations(runtime_config) + + +def _get_runtime_endpoints( + runtime_config: Dict[str, Any], cluster_head_ip): + service_port = _get_service_port(runtime_config) + endpoints = { + "kong": { + "name": "KONG", + "url": "http://{}".format( + get_address_string(cluster_head_ip, service_port)) + }, + } + return endpoints + + +def _get_head_service_ports( + runtime_config: Dict[str, Any]) -> Dict[str, Any]: + service_port = _get_service_port(runtime_config) + service_ports = { + "kong": { + "protocol": "TCP", + "port": service_port, + }, + } + return service_ports + + +def _get_runtime_services( + runtime_config: Dict[str, Any], cluster_name: str) -> Dict[str, Any]: + kong_config = _get_config(runtime_config) + service_discovery_config = get_service_discovery_config(kong_config) + service_name = get_canonical_service_name( + service_discovery_config, cluster_name, KONG_SERVICE_NAME) + service_port = _get_service_port(kong_config) + services = { + service_name: define_runtime_service( + service_discovery_config, service_port, + protocol=SERVICE_DISCOVERY_PROTOCOL_HTTP, + features=[SERVICE_DISCOVERY_FEATURE_API_GATEWAY]), + } + return services diff --git a/python/cloudtik/runtime/nginx/scripts/configure.py b/python/cloudtik/runtime/nginx/scripts/configure.py index 0e0181344..c6a857fc8 100644 --- a/python/cloudtik/runtime/nginx/scripts/configure.py +++ b/python/cloudtik/runtime/nginx/scripts/configure.py @@ -1,6 +1,6 @@ import argparse -import os +from cloudtik.core._private.runtime_utils import get_runtime_bool from cloudtik.runtime.nginx.utils import configure_backend @@ -11,8 +11,8 @@ def main(): help='Configuring for head node.') args = parser.parse_args() - high_availability = os.environ.get("NGINX_HIGH_AVAILABILITY") - if high_availability == "true" or args.head: + high_availability = get_runtime_bool("NGINX_HIGH_AVAILABILITY") + if high_availability or args.head: configure_backend(args.head) diff --git a/python/cloudtik/runtime/nginx/scripts/services.py b/python/cloudtik/runtime/nginx/scripts/services.py index 64ece19e5..2bbd0bbce 100644 --- a/python/cloudtik/runtime/nginx/scripts/services.py +++ b/python/cloudtik/runtime/nginx/scripts/services.py @@ -1,6 +1,6 @@ import argparse -from cloudtik.core._private.runtime_utils import get_runtime_value +from cloudtik.core._private.runtime_utils import get_runtime_value, get_runtime_bool from cloudtik.runtime.nginx.utils \ import start_pull_server, stop_pull_server, NGINX_APP_MODE_API_GATEWAY, \ NGINX_CONFIG_MODE_DNS, NGINX_APP_MODE_LOAD_BALANCER, NGINX_CONFIG_MODE_DYNAMIC @@ -46,8 +46,8 @@ def main(): ) args = parser.parse_args() - high_availability = get_runtime_value("NGINX_HIGH_AVAILABILITY") - if high_availability == "true" or args.head: + high_availability = get_runtime_bool("NGINX_HIGH_AVAILABILITY") + if high_availability or args.head: if args.command == "start": start_service(args.head) elif args.command == "stop": diff --git a/python/cloudtik/runtime/prometheus/scripts/configure.py b/python/cloudtik/runtime/prometheus/scripts/configure.py index d44b0c448..82b20ad07 100644 --- a/python/cloudtik/runtime/prometheus/scripts/configure.py +++ b/python/cloudtik/runtime/prometheus/scripts/configure.py @@ -1,6 +1,6 @@ import argparse -import os +from cloudtik.core._private.runtime_utils import get_runtime_bool from cloudtik.runtime.prometheus.utils import configure_scrape @@ -11,8 +11,8 @@ def main(): help='Configuring for head node.') args = parser.parse_args() - high_availability = os.environ.get("PROMETHEUS_HIGH_AVAILABILITY") - if high_availability == "true" or args.head: + high_availability = get_runtime_bool("PROMETHEUS_HIGH_AVAILABILITY") + if high_availability or args.head: configure_scrape(args.head) diff --git a/python/cloudtik/runtime/prometheus/scripts/services.py b/python/cloudtik/runtime/prometheus/scripts/services.py index 2f272e00f..10d2cd6d5 100644 --- a/python/cloudtik/runtime/prometheus/scripts/services.py +++ b/python/cloudtik/runtime/prometheus/scripts/services.py @@ -1,7 +1,7 @@ import argparse import os -from cloudtik.core._private.runtime_utils import get_runtime_value +from cloudtik.core._private.runtime_utils import get_runtime_bool from cloudtik.runtime.prometheus.utils import start_pull_server, stop_pull_server, _get_home_dir @@ -39,8 +39,8 @@ def main(): ) args = parser.parse_args() - high_availability = get_runtime_value("PROMETHEUS_HIGH_AVAILABILITY") - if high_availability == "true" or args.head: + high_availability = get_runtime_bool("PROMETHEUS_HIGH_AVAILABILITY") + if high_availability or args.head: if args.command == "start": start_service(args.head) elif args.command == "stop": diff --git a/python/cloudtik/runtime/spark/utils.py b/python/cloudtik/runtime/spark/utils.py index c574d743f..08266f24b 100644 --- a/python/cloudtik/runtime/spark/utils.py +++ b/python/cloudtik/runtime/spark/utils.py @@ -14,7 +14,7 @@ from cloudtik.core._private.utils import \ round_memory_size_to_gb, load_head_cluster_config, \ RUNTIME_CONFIG_KEY, load_properties_file, save_properties_file, is_use_managed_cloud_storage, \ - print_json_formatted, get_config_for_update, get_runtime_config + print_json_formatted, get_config_for_update, get_runtime_config, PROVIDER_STORAGE_CONFIG_KEY from cloudtik.core.scaling_policy import ScalingPolicy from cloudtik.runtime.common.service_discovery.cluster import has_runtime_in_cluster from cloudtik.runtime.common.service_discovery.runtime_discovery import \ @@ -332,8 +332,8 @@ def _is_valid_storage_config(config: Dict[str, Any], final=False): # Check any cloud storage is configured provider_config = config["provider"] - if ("storage" in provider_config or - is_use_managed_cloud_storage(config)): + if (PROVIDER_STORAGE_CONFIG_KEY in provider_config or + (not final and is_use_managed_cloud_storage(config))): return True # if there is service discovery mechanism, assume we can get from service discovery