Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Integrate kind into local deployment to no longer require minikube for development #1171

Merged
merged 15 commits into from
Aug 9, 2022
Merged
47 changes: 5 additions & 42 deletions .github/workflows/kubernetes_test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,63 +51,36 @@ jobs:
run: |
conda install -c anaconda pip
pip install .[dev]
- name: Download and Install Minikube and Kubectl
- name: Download and Install Kind and Kubectl
run: |
mkdir -p bin
pushd bin
curl -L https://github.com/kubernetes/minikube/releases/download/v1.22.0/minikube-linux-amd64 -o minikube
chmod +x minikube

curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/amd64/kubectl
chmod +x kubectl

echo "$PWD" >> $GITHUB_PATH
popd
- name: Start Minikube
- name: Enable docker permissions for user
run: |
sudo docker ps
sudo usermod -aG docker $USER && newgrp docker
minikube start --kubernetes-version=1.19.4 --driver=docker --cpus 4 --memory 12288 --wait=all
- name: Print minikube and kubectl versions
run: |
minikube version
kubectl version
- name: Use minikube docker daemon
run: |
eval $(minikube docker-env)
echo "DOCKER_TLS_VERIFY=$DOCKER_TLS_VERIFY" >> $GITHUB_ENV
echo "DOCKER_HOST=$DOCKER_HOST" >> $GITHUB_ENV
echo "DOCKER_CERT_PATH=$DOCKER_CERT_PATH" >> $GITHUB_ENV
echo "MINIKUBE_ACTIVE_DOCKERD=$MINIKUBE_ACTIVE_DOCKERD" >> $GITHUB_ENV
- name: Print docker connection information
run: |

docker info
docker ps
- name: List docker images in minikube
run: |
docker images
- name: Get routing table for docker pods
run: |
ip route
- name: Configure LoadBalancer IPs
run: |
python tests/scripts/minikube-loadbalancer-ip.py
- name: Add DNS entry to hosts
run: |
sudo echo "192.168.49.100 github-actions.qhub.dev" | sudo tee -a /etc/hosts
- name: Enable Minikube metallb
run: |
minikube addons enable metallb
- name: Basic kubectl checks before deployment
run: |
kubectl get all,cm,secret,ing -A
sudo echo "172.18.1.100 github-actions.qhub.dev" | sudo tee -a /etc/hosts
- name: Initialize QHub Cloud
run: |
mkdir -p local-deployment
cd local-deployment
qhub init local --project=thisisatest --domain github-actions.qhub.dev --auth-provider=password

# Need smaller profiles on Minikube
# Need smaller profiles on Local Kind
sed -i -E 's/(cpu_guarantee):\s+[0-9\.]+/\1: 0.25/g' "qhub-config.yaml"
sed -i -E 's/(mem_guarantee):\s+[A-Za-z0-9\.]+/\1: 0.25G/g' "qhub-config.yaml"

Expand Down Expand Up @@ -194,13 +167,3 @@ jobs:
run: |
cd local-deployment
qhub destroy --config qhub-config.yaml

- name: Basic kubectl checks after cleanup
if: always()
run: |
kubectl get all,cm,secret,ing -A

- name: Delete minikube cluster
if: always()
run: |
minikube delete
1 change: 1 addition & 0 deletions .github/workflows/test-provider.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ jobs:
- do
- gcp
- local
- existing
cicd:
- none
- github-actions
Expand Down
1 change: 1 addition & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ repos:
- id: check-yaml
# jinja2 templates for helm charts
exclude: 'qhub/template/stages/07-kubernetes-services/modules/kubernetes/services/(clearml/chart/templates/.*|prefect/chart/templates/.*)'
args: [--allow-multiple-documents]

- repo: https://github.com/codespell-project/codespell
rev: v2.1.0
Expand Down
24 changes: 24 additions & 0 deletions qhub/initialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,23 @@
}
}

EXISTING = {
"node_selectors": {
"general": {
"key": "kubernetes.io/os",
"value": "linux",
},
"user": {
"key": "kubernetes.io/os",
"value": "linux",
},
"worker": {
"key": "kubernetes.io/os",
"value": "linux",
},
}
}

DIGITAL_OCEAN = {
"region": "nyc3",
"kubernetes_version": "PLACEHOLDER",
Expand Down Expand Up @@ -390,6 +407,13 @@ def render_config(
set_kubernetes_version(config, kubernetes_version, cloud_provider)
if "AWS_DEFAULT_REGION" in os.environ:
config["amazon_web_services"]["region"] = os.environ["AWS_DEFAULT_REGION"]

elif cloud_provider == "existing":
config["theme"]["jupyterhub"][
"hub_subtitle"
] = "Autoscaling Compute Environment"
config["existing"] = EXISTING.copy()

elif cloud_provider == "local":
config["theme"]["jupyterhub"][
"hub_subtitle"
Expand Down
2 changes: 1 addition & 1 deletion qhub/provider/cicd/github.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def gha_env_vars(config):
env_vars["DIGITALOCEAN_TOKEN"] = "${{ secrets.DIGITALOCEAN_TOKEN }}"
elif config["provider"] == "gcp":
env_vars["GOOGLE_CREDENTIALS"] = "${{ secrets.GOOGLE_CREDENTIALS }}"
elif config["provider"] == "local":
elif config["provider"] in ["local", "existing"]:
# create mechanism to allow for extra env vars?
pass
else:
Expand Down
7 changes: 7 additions & 0 deletions qhub/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ class TerraformStateEnum(str, enum.Enum):

class ProviderEnum(str, enum.Enum):
local = "local"
existing = "existing"
do = "do"
aws = "aws"
gcp = "gcp"
Expand Down Expand Up @@ -311,6 +312,11 @@ class LocalProvider(Base):
node_selectors: typing.Dict[str, KeyValueDict]


class ExistingProvider(Base):
kube_context: typing.Optional[str]
node_selectors: typing.Dict[str, KeyValueDict]


# ================= Theme ==================


Expand Down Expand Up @@ -488,6 +494,7 @@ class Main(Base):
default_images: DefaultImages
storage: typing.Dict[str, str]
local: typing.Optional[LocalProvider]
existing: typing.Optional[ExistingProvider]
google_cloud_platform: typing.Optional[GoogleCloudPlatformProvider]
amazon_web_services: typing.Optional[AmazonWebServicesProvider]
azure: typing.Optional[AzureProvider]
Expand Down
11 changes: 10 additions & 1 deletion qhub/stages/input_vars.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,14 @@ def stage_01_terraform_state(stage_outputs, config):

def stage_02_infrastructure(stage_outputs, config):
if config["provider"] == "local":
return {"kube_context": config["local"].get("kube_context")}
return {
"kubeconfig_filename": os.path.join(
tempfile.gettempdir(), "QHUB_KUBECONFIG"
),
"kube_context": config["local"].get("kube_context"),
}
elif config["provider"] == "existing":
return {"kube_context": config["existing"].get("kube_context")}
elif config["provider"] == "do":
return {
"name": config["project_name"],
Expand Down Expand Up @@ -165,6 +172,8 @@ def _calculate_note_groups(config):
group: {"key": "doks.digitalocean.com/node-pool", "value": group}
for group in ["general", "user", "worker"]
}
elif config["provider"] == "existing":
return config["existing"].get("node_selectors")
else:
return config["local"]["node_selectors"]

Expand Down
10 changes: 10 additions & 0 deletions qhub/stages/tf_objects.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,16 @@ def QHubTerraformState(directory: str, qhub_config: Dict):
container_name=f"{qhub_config['project_name']}-{qhub_config['namespace']}-state",
key=f"terraform/{qhub_config['project_name']}-{qhub_config['namespace']}/{directory}",
)
elif qhub_config["provider"] == "existing":
optional_kwargs = {}
if "kube_context" in qhub_config["existing"]:
optional_kwargs["confix_context"] = qhub_config["existing"]["kube_context"]
return TerraformBackend(
"kubernetes",
secret_suffix=f"{qhub_config['project_name']}-{qhub_config['namespace']}-{directory}",
load_config_file=True,
**optional_kwargs,
)
elif qhub_config["provider"] == "local":
optional_kwargs = {}
if "kube_context" in qhub_config["local"]:
Expand Down
17 changes: 17 additions & 0 deletions qhub/template/stages/02-infrastructure/existing/main.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
variable "kube_context" {
description = "Optional kubernetes context to use to connect to kubernetes cluster"
type = string
}

output "kubernetes_credentials" {
description = "Parameters needed to connect to kubernetes cluster locally"
value = {
config_path = pathexpand("~/.kube/config")
config_context = var.kube_context
}
}

output "kubeconfig_filename" {
description = "filename for qhub kubeconfig"
value = pathexpand("~/.kube/config")
}
116 changes: 105 additions & 11 deletions qhub/template/stages/02-infrastructure/local/main.tf
Original file line number Diff line number Diff line change
@@ -1,17 +1,111 @@
variable "kube_context" {
description = "Optional kubernetes context to use to connect to kubernetes cluster"
type = string
terraform {
required_providers {
kind = {
source = "kyma-incubator/kind"
version = "0.0.11"
}
docker = {
source = "kreuzwerker/docker"
version = "2.16.0"
}
kubectl = {
source = "gavinbunney/kubectl"
version = ">= 1.7.0"
}
}
}

provider "kind" {

}

provider "docker" {

}

output "kubernetes_credentials" {
description = "Parameters needed to connect to kubernetes cluster locally"
value = {
config_path = pathexpand("~/.kube/config")
config_context = var.kube_context
provider "kubernetes" {
host = kind_cluster.default.endpoint
cluster_ca_certificate = kind_cluster.default.cluster_ca_certificate
client_key = kind_cluster.default.client_key
client_certificate = kind_cluster.default.client_certificate
}

provider "kubectl" {
load_config_file = false
host = kind_cluster.default.endpoint
cluster_ca_certificate = kind_cluster.default.cluster_ca_certificate
client_key = kind_cluster.default.client_key
client_certificate = kind_cluster.default.client_certificate
}

resource "kind_cluster" "default" {
name = "test-cluster"
wait_for_ready = true

kind_config {
kind = "Cluster"
api_version = "kind.x-k8s.io/v1alpha4"

node {
role = "general"
image = "kindest/node:v1.21.10"
}
}
}

output "kubeconfig_filename" {
description = "filename for qhub kubeconfig"
value = pathexpand("~/.kube/config")
resource "kubernetes_namespace" "metallb" {
metadata {
name = "metallb-system"
}
}

data "kubectl_path_documents" "metallb" {
pattern = "${path.module}/metallb.yaml"
}

resource "kubectl_manifest" "metallb" {
for_each = toset(data.kubectl_path_documents.metallb.documents)
yaml_body = each.value
wait = true
depends_on = [kubernetes_namespace.metallb]
}

resource "kubectl_manifest" "load-balancer" {
yaml_body = yamlencode({
apiVersion = "v1"
kind = "ConfigMap"
metadata = {
namespace = kubernetes_namespace.metallb.metadata.0.name
name = "config"
}
data = {
config = yamlencode({
address-pools = [{
name = "default"
protocol = "layer2"
addresses = [
"${local.metallb_ip_min}-${local.metallb_ip_max}"
]
}]
})
}
})

depends_on = [kubectl_manifest.metallb]
}

data "docker_network" "kind" {
name = "kind"

depends_on = [kind_cluster.default]
}

locals {
metallb_ip_min = cidrhost([
for network in data.docker_network.kind.ipam_config : network if network.gateway != ""
][0].subnet, 356)

metallb_ip_max = cidrhost([
for network in data.docker_network.kind.ipam_config : network if network.gateway != ""
][0].subnet, 406)
}
Loading